1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/InlineAsm.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/GCStrategy.h"
31 #include "llvm/CodeGen/GCMetadata.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/PseudoSourceValue.h"
39 #include "llvm/CodeGen/SelectionDAG.h"
40 #include "llvm/CodeGen/DwarfWriter.h"
41 #include "llvm/Analysis/DebugInfo.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Target/TargetFrameInfo.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetLowering.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
57 /// LimitFloatPrecision - Generate low-precision inline sequences for
58 /// some float libcalls (6, 8 or 12 bits).
59 static unsigned LimitFloatPrecision;
61 static cl::opt<unsigned, true>
62 LimitFPPrecision("limit-float-precision",
63 cl::desc("Generate low-precision inline sequences "
64 "for some float libcalls"),
65 cl::location(LimitFloatPrecision),
68 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
69 /// of insertvalue or extractvalue indices that identify a member, return
70 /// the linearized index of the start of the member.
72 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
73 const unsigned *Indices,
74 const unsigned *IndicesEnd,
75 unsigned CurIndex = 0) {
76 // Base case: We're done.
77 if (Indices && Indices == IndicesEnd)
80 // Given a struct type, recursively traverse the elements.
81 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
82 for (StructType::element_iterator EB = STy->element_begin(),
84 EE = STy->element_end();
86 if (Indices && *Indices == unsigned(EI - EB))
87 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
88 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
92 // Given an array type, recursively traverse the elements.
93 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
94 const Type *EltTy = ATy->getElementType();
95 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
96 if (Indices && *Indices == i)
97 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
98 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
102 // We haven't found the type we're looking for, so keep searching.
106 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
107 /// MVTs that represent all the individual underlying
108 /// non-aggregate types that comprise it.
110 /// If Offsets is non-null, it points to a vector to be filled in
111 /// with the in-memory offsets of each of the individual values.
113 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
114 SmallVectorImpl<MVT> &ValueVTs,
115 SmallVectorImpl<uint64_t> *Offsets = 0,
116 uint64_t StartingOffset = 0) {
117 // Given a struct type, recursively traverse the elements.
118 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
119 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
120 for (StructType::element_iterator EB = STy->element_begin(),
122 EE = STy->element_end();
124 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
125 StartingOffset + SL->getElementOffset(EI - EB));
128 // Given an array type, recursively traverse the elements.
129 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
130 const Type *EltTy = ATy->getElementType();
131 uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy);
132 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
133 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
134 StartingOffset + i * EltSize);
137 // Base case: we can get an MVT for this LLVM IR type.
138 ValueVTs.push_back(TLI.getValueType(Ty));
140 Offsets->push_back(StartingOffset);
144 /// RegsForValue - This struct represents the registers (physical or virtual)
145 /// that a particular set of values is assigned, and the type information about
146 /// the value. The most common situation is to represent one value at a time,
147 /// but struct or array values are handled element-wise as multiple values.
148 /// The splitting of aggregates is performed recursively, so that we never
149 /// have aggregate-typed registers. The values at this point do not necessarily
150 /// have legal types, so each value may require one or more registers of some
153 struct VISIBILITY_HIDDEN RegsForValue {
154 /// TLI - The TargetLowering object.
156 const TargetLowering *TLI;
158 /// ValueVTs - The value types of the values, which may not be legal, and
159 /// may need be promoted or synthesized from one or more registers.
161 SmallVector<MVT, 4> ValueVTs;
163 /// RegVTs - The value types of the registers. This is the same size as
164 /// ValueVTs and it records, for each value, what the type of the assigned
165 /// register or registers are. (Individual values are never synthesized
166 /// from more than one type of register.)
168 /// With virtual registers, the contents of RegVTs is redundant with TLI's
169 /// getRegisterType member function, however when with physical registers
170 /// it is necessary to have a separate record of the types.
172 SmallVector<MVT, 4> RegVTs;
174 /// Regs - This list holds the registers assigned to the values.
175 /// Each legal or promoted value requires one register, and each
176 /// expanded value requires multiple registers.
178 SmallVector<unsigned, 4> Regs;
180 RegsForValue() : TLI(0) {}
182 RegsForValue(const TargetLowering &tli,
183 const SmallVector<unsigned, 4> ®s,
184 MVT regvt, MVT valuevt)
185 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
186 RegsForValue(const TargetLowering &tli,
187 const SmallVector<unsigned, 4> ®s,
188 const SmallVector<MVT, 4> ®vts,
189 const SmallVector<MVT, 4> &valuevts)
190 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
191 RegsForValue(const TargetLowering &tli,
192 unsigned Reg, const Type *Ty) : TLI(&tli) {
193 ComputeValueVTs(tli, Ty, ValueVTs);
195 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
196 MVT ValueVT = ValueVTs[Value];
197 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
198 MVT RegisterVT = TLI->getRegisterType(ValueVT);
199 for (unsigned i = 0; i != NumRegs; ++i)
200 Regs.push_back(Reg + i);
201 RegVTs.push_back(RegisterVT);
206 /// append - Add the specified values to this one.
207 void append(const RegsForValue &RHS) {
209 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
210 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
211 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
215 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
216 /// this value and returns the result as a ValueVTs value. This uses
217 /// Chain/Flag as the input and updates them for the output Chain/Flag.
218 /// If the Flag pointer is NULL, no flag is used.
219 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
220 SDValue &Chain, SDValue *Flag) const;
222 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
223 /// specified value into the registers specified by this object. This uses
224 /// Chain/Flag as the input and updates them for the output Chain/Flag.
225 /// If the Flag pointer is NULL, no flag is used.
226 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
227 SDValue &Chain, SDValue *Flag) const;
229 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
230 /// operand list. This adds the code marker and includes the number of
231 /// values added into it.
232 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
233 std::vector<SDValue> &Ops) const;
237 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
238 /// PHI nodes or outside of the basic block that defines it, or used by a
239 /// switch or atomic instruction, which may expand to multiple basic blocks.
240 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
241 if (isa<PHINode>(I)) return true;
242 BasicBlock *BB = I->getParent();
243 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
244 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
245 // FIXME: Remove switchinst special case.
246 isa<SwitchInst>(*UI))
251 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
252 /// entry block, return true. This includes arguments used by switches, since
253 /// the switch may expand into multiple basic blocks.
254 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
255 // With FastISel active, we may be splitting blocks, so force creation
256 // of virtual registers for all non-dead arguments.
257 // Don't force virtual registers for byval arguments though, because
258 // fast-isel can't handle those in all cases.
259 if (EnableFastISel && !A->hasByValAttr())
260 return A->use_empty();
262 BasicBlock *Entry = A->getParent()->begin();
263 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
264 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
265 return false; // Use not in entry block.
269 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
273 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
274 bool EnableFastISel) {
277 RegInfo = &MF->getRegInfo();
279 // Create a vreg for each argument register that is not dead and is used
280 // outside of the entry block for the function.
281 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
283 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
284 InitializeRegForValue(AI);
286 // Initialize the mapping of values to registers. This is only set up for
287 // instruction values that are used outside of the block that defines
289 Function::iterator BB = Fn->begin(), EB = Fn->end();
290 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
291 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
292 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
293 const Type *Ty = AI->getAllocatedType();
294 uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
296 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
299 TySize *= CUI->getZExtValue(); // Get total allocated size.
300 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
301 StaticAllocaMap[AI] =
302 MF->getFrameInfo()->CreateStackObject(TySize, Align);
305 for (; BB != EB; ++BB)
306 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
307 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
308 if (!isa<AllocaInst>(I) ||
309 !StaticAllocaMap.count(cast<AllocaInst>(I)))
310 InitializeRegForValue(I);
312 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
313 // also creates the initial PHI MachineInstrs, though none of the input
314 // operands are populated.
315 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
316 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
320 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
323 for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){
324 if (PN->use_empty()) continue;
326 unsigned PHIReg = ValueMap[PN];
327 assert(PHIReg && "PHI node does not have an assigned virtual register!");
329 SmallVector<MVT, 4> ValueVTs;
330 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
331 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
332 MVT VT = ValueVTs[vti];
333 unsigned NumRegisters = TLI.getNumRegisters(VT);
334 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
335 for (unsigned i = 0; i != NumRegisters; ++i)
336 BuildMI(MBB, TII->get(TargetInstrInfo::PHI), PHIReg+i);
337 PHIReg += NumRegisters;
343 unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
344 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
347 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
348 /// the correctly promoted or expanded types. Assign these registers
349 /// consecutive vreg numbers and return the first assigned number.
351 /// In the case that the given value has struct or array type, this function
352 /// will assign registers for each member or element.
354 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
355 SmallVector<MVT, 4> ValueVTs;
356 ComputeValueVTs(TLI, V->getType(), ValueVTs);
358 unsigned FirstReg = 0;
359 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
360 MVT ValueVT = ValueVTs[Value];
361 MVT RegisterVT = TLI.getRegisterType(ValueVT);
363 unsigned NumRegs = TLI.getNumRegisters(ValueVT);
364 for (unsigned i = 0; i != NumRegs; ++i) {
365 unsigned R = MakeReg(RegisterVT);
366 if (!FirstReg) FirstReg = R;
372 /// getCopyFromParts - Create a value that contains the specified legal parts
373 /// combined into the value they represent. If the parts combine to a type
374 /// larger then ValueVT then AssertOp can be used to specify whether the extra
375 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
376 /// (ISD::AssertSext).
377 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
378 const SDValue *Parts,
379 unsigned NumParts, MVT PartVT, MVT ValueVT,
380 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
381 assert(NumParts > 0 && "No parts to assemble!");
382 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
383 SDValue Val = Parts[0];
386 // Assemble the value from multiple parts.
387 if (!ValueVT.isVector()) {
388 unsigned PartBits = PartVT.getSizeInBits();
389 unsigned ValueBits = ValueVT.getSizeInBits();
391 // Assemble the power of 2 part.
392 unsigned RoundParts = NumParts & (NumParts - 1) ?
393 1 << Log2_32(NumParts) : NumParts;
394 unsigned RoundBits = PartBits * RoundParts;
395 MVT RoundVT = RoundBits == ValueBits ?
396 ValueVT : MVT::getIntegerVT(RoundBits);
399 MVT HalfVT = ValueVT.isInteger() ?
400 MVT::getIntegerVT(RoundBits/2) :
401 MVT::getFloatingPointVT(RoundBits/2);
403 if (RoundParts > 2) {
404 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
405 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
408 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
409 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
411 if (TLI.isBigEndian())
413 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
415 if (RoundParts < NumParts) {
416 // Assemble the trailing non-power-of-2 part.
417 unsigned OddParts = NumParts - RoundParts;
418 MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
419 Hi = getCopyFromParts(DAG, dl,
420 Parts+RoundParts, OddParts, PartVT, OddVT);
422 // Combine the round and odd parts.
424 if (TLI.isBigEndian())
426 MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
427 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
428 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
429 DAG.getConstant(Lo.getValueType().getSizeInBits(),
430 TLI.getShiftAmountTy()));
431 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
432 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
435 // Handle a multi-element vector.
436 MVT IntermediateVT, RegisterVT;
437 unsigned NumIntermediates;
439 TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
441 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
442 NumParts = NumRegs; // Silence a compiler warning.
443 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
444 assert(RegisterVT == Parts[0].getValueType() &&
445 "Part type doesn't match part!");
447 // Assemble the parts into intermediate operands.
448 SmallVector<SDValue, 8> Ops(NumIntermediates);
449 if (NumIntermediates == NumParts) {
450 // If the register was not expanded, truncate or copy the value,
452 for (unsigned i = 0; i != NumParts; ++i)
453 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
454 PartVT, IntermediateVT);
455 } else if (NumParts > 0) {
456 // If the intermediate type was expanded, build the intermediate operands
458 assert(NumParts % NumIntermediates == 0 &&
459 "Must expand into a divisible number of parts!");
460 unsigned Factor = NumParts / NumIntermediates;
461 for (unsigned i = 0; i != NumIntermediates; ++i)
462 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
463 PartVT, IntermediateVT);
466 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
468 Val = DAG.getNode(IntermediateVT.isVector() ?
469 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
470 ValueVT, &Ops[0], NumIntermediates);
474 // There is now one part, held in Val. Correct it to match ValueVT.
475 PartVT = Val.getValueType();
477 if (PartVT == ValueVT)
480 if (PartVT.isVector()) {
481 assert(ValueVT.isVector() && "Unknown vector conversion!");
482 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
485 if (ValueVT.isVector()) {
486 assert(ValueVT.getVectorElementType() == PartVT &&
487 ValueVT.getVectorNumElements() == 1 &&
488 "Only trivial scalar-to-vector conversions should get here!");
489 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
492 if (PartVT.isInteger() &&
493 ValueVT.isInteger()) {
494 if (ValueVT.bitsLT(PartVT)) {
495 // For a truncate, see if we have any information to
496 // indicate whether the truncated bits will always be
497 // zero or sign-extension.
498 if (AssertOp != ISD::DELETED_NODE)
499 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
500 DAG.getValueType(ValueVT));
501 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
503 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
507 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
508 if (ValueVT.bitsLT(Val.getValueType()))
509 // FP_ROUND's are always exact here.
510 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
511 DAG.getIntPtrConstant(1));
512 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
515 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
516 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
518 assert(0 && "Unknown mismatch!");
522 /// getCopyToParts - Create a series of nodes that contain the specified value
523 /// split into legal parts. If the parts contain more bits than Val, then, for
524 /// integers, ExtendKind can be used to specify how to generate the extra bits.
525 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
526 SDValue *Parts, unsigned NumParts, MVT PartVT,
527 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
528 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
529 MVT PtrVT = TLI.getPointerTy();
530 MVT ValueVT = Val.getValueType();
531 unsigned PartBits = PartVT.getSizeInBits();
532 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
537 if (!ValueVT.isVector()) {
538 if (PartVT == ValueVT) {
539 assert(NumParts == 1 && "No-op copy with multiple parts!");
544 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
545 // If the parts cover more bits than the value has, promote the value.
546 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
547 assert(NumParts == 1 && "Do not know what to promote to!");
548 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
549 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
550 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
551 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
553 assert(0 && "Unknown mismatch!");
555 } else if (PartBits == ValueVT.getSizeInBits()) {
556 // Different types of the same size.
557 assert(NumParts == 1 && PartVT != ValueVT);
558 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
559 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
560 // If the parts cover less bits than value has, truncate the value.
561 if (PartVT.isInteger() && ValueVT.isInteger()) {
562 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
563 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
565 assert(0 && "Unknown mismatch!");
569 // The value may have changed - recompute ValueVT.
570 ValueVT = Val.getValueType();
571 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
572 "Failed to tile the value with PartVT!");
575 assert(PartVT == ValueVT && "Type conversion failed!");
580 // Expand the value into multiple parts.
581 if (NumParts & (NumParts - 1)) {
582 // The number of parts is not a power of 2. Split off and copy the tail.
583 assert(PartVT.isInteger() && ValueVT.isInteger() &&
584 "Do not know what to expand to!");
585 unsigned RoundParts = 1 << Log2_32(NumParts);
586 unsigned RoundBits = RoundParts * PartBits;
587 unsigned OddParts = NumParts - RoundParts;
588 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
589 DAG.getConstant(RoundBits,
590 TLI.getShiftAmountTy()));
591 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
592 if (TLI.isBigEndian())
593 // The odd parts were reversed by getCopyToParts - unreverse them.
594 std::reverse(Parts + RoundParts, Parts + NumParts);
595 NumParts = RoundParts;
596 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
597 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
600 // The number of parts is a power of 2. Repeatedly bisect the value using
602 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
603 MVT::getIntegerVT(ValueVT.getSizeInBits()),
605 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
606 for (unsigned i = 0; i < NumParts; i += StepSize) {
607 unsigned ThisBits = StepSize * PartBits / 2;
608 MVT ThisVT = MVT::getIntegerVT (ThisBits);
609 SDValue &Part0 = Parts[i];
610 SDValue &Part1 = Parts[i+StepSize/2];
612 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
614 DAG.getConstant(1, PtrVT));
615 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
617 DAG.getConstant(0, PtrVT));
619 if (ThisBits == PartBits && ThisVT != PartVT) {
620 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
622 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
628 if (TLI.isBigEndian())
629 std::reverse(Parts, Parts + NumParts);
636 if (PartVT != ValueVT) {
637 if (PartVT.isVector()) {
638 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
640 assert(ValueVT.getVectorElementType() == PartVT &&
641 ValueVT.getVectorNumElements() == 1 &&
642 "Only trivial vector-to-scalar conversions should get here!");
643 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
645 DAG.getConstant(0, PtrVT));
653 // Handle a multi-element vector.
654 MVT IntermediateVT, RegisterVT;
655 unsigned NumIntermediates;
656 unsigned NumRegs = TLI
657 .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
659 unsigned NumElements = ValueVT.getVectorNumElements();
661 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
662 NumParts = NumRegs; // Silence a compiler warning.
663 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
665 // Split the vector into intermediate operands.
666 SmallVector<SDValue, 8> Ops(NumIntermediates);
667 for (unsigned i = 0; i != NumIntermediates; ++i)
668 if (IntermediateVT.isVector())
669 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
671 DAG.getConstant(i * (NumElements / NumIntermediates),
674 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
676 DAG.getConstant(i, PtrVT));
678 // Split the intermediate operands into legal parts.
679 if (NumParts == NumIntermediates) {
680 // If the register was not expanded, promote or copy the value,
682 for (unsigned i = 0; i != NumParts; ++i)
683 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
684 } else if (NumParts > 0) {
685 // If the intermediate type was expanded, split each the value into
687 assert(NumParts % NumIntermediates == 0 &&
688 "Must expand into a divisible number of parts!");
689 unsigned Factor = NumParts / NumIntermediates;
690 for (unsigned i = 0; i != NumIntermediates; ++i)
691 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
696 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
699 TD = DAG.getTarget().getTargetData();
702 /// clear - Clear out the curret SelectionDAG and the associated
703 /// state and prepare this SelectionDAGLowering object to be used
704 /// for a new block. This doesn't clear out information about
705 /// additional blocks that are needed to complete switch lowering
706 /// or PHI node updating; that information is cleared out as it is
708 void SelectionDAGLowering::clear() {
710 PendingLoads.clear();
711 PendingExports.clear();
715 /// getRoot - Return the current virtual root of the Selection DAG,
716 /// flushing any PendingLoad items. This must be done before emitting
717 /// a store or any other node that may need to be ordered after any
718 /// prior load instructions.
720 SDValue SelectionDAGLowering::getRoot() {
721 if (PendingLoads.empty())
722 return DAG.getRoot();
724 if (PendingLoads.size() == 1) {
725 SDValue Root = PendingLoads[0];
727 PendingLoads.clear();
731 // Otherwise, we have to make a token factor node.
732 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
733 &PendingLoads[0], PendingLoads.size());
734 PendingLoads.clear();
739 /// getControlRoot - Similar to getRoot, but instead of flushing all the
740 /// PendingLoad items, flush all the PendingExports items. It is necessary
741 /// to do this before emitting a terminator instruction.
743 SDValue SelectionDAGLowering::getControlRoot() {
744 SDValue Root = DAG.getRoot();
746 if (PendingExports.empty())
749 // Turn all of the CopyToReg chains into one factored node.
750 if (Root.getOpcode() != ISD::EntryToken) {
751 unsigned i = 0, e = PendingExports.size();
752 for (; i != e; ++i) {
753 assert(PendingExports[i].getNode()->getNumOperands() > 1);
754 if (PendingExports[i].getNode()->getOperand(0) == Root)
755 break; // Don't add the root if we already indirectly depend on it.
759 PendingExports.push_back(Root);
762 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
764 PendingExports.size());
765 PendingExports.clear();
770 void SelectionDAGLowering::visit(Instruction &I) {
771 visit(I.getOpcode(), I);
774 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
775 // Note: this doesn't use InstVisitor, because it has to work with
776 // ConstantExpr's in addition to instructions.
778 default: assert(0 && "Unknown instruction type encountered!");
780 // Build the switch statement using the Instruction.def file.
781 #define HANDLE_INST(NUM, OPCODE, CLASS) \
782 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
783 #include "llvm/Instruction.def"
787 void SelectionDAGLowering::visitAdd(User &I) {
788 if (I.getType()->isFPOrFPVector())
789 visitBinary(I, ISD::FADD);
791 visitBinary(I, ISD::ADD);
794 void SelectionDAGLowering::visitMul(User &I) {
795 if (I.getType()->isFPOrFPVector())
796 visitBinary(I, ISD::FMUL);
798 visitBinary(I, ISD::MUL);
801 SDValue SelectionDAGLowering::getValue(const Value *V) {
802 SDValue &N = NodeMap[V];
803 if (N.getNode()) return N;
805 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
806 MVT VT = TLI.getValueType(V->getType(), true);
808 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
809 return N = DAG.getConstant(*CI, VT);
811 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
812 return N = DAG.getGlobalAddress(GV, VT);
814 if (isa<ConstantPointerNull>(C))
815 return N = DAG.getConstant(0, TLI.getPointerTy());
817 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
818 return N = DAG.getConstantFP(*CFP, VT);
820 if (isa<UndefValue>(C) && !isa<VectorType>(V->getType()) &&
821 !V->getType()->isAggregateType())
822 return N = DAG.getNode(ISD::UNDEF, getCurDebugLoc(), VT);
824 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
825 visit(CE->getOpcode(), *CE);
826 SDValue N1 = NodeMap[V];
827 assert(N1.getNode() && "visit didn't populate the ValueMap!");
831 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
832 SmallVector<SDValue, 4> Constants;
833 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
835 SDNode *Val = getValue(*OI).getNode();
836 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
837 Constants.push_back(SDValue(Val, i));
839 return DAG.getMergeValues(&Constants[0], Constants.size());
842 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
843 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
844 "Unknown struct or array constant!");
846 SmallVector<MVT, 4> ValueVTs;
847 ComputeValueVTs(TLI, C->getType(), ValueVTs);
848 unsigned NumElts = ValueVTs.size();
850 return SDValue(); // empty struct
851 SmallVector<SDValue, 4> Constants(NumElts);
852 for (unsigned i = 0; i != NumElts; ++i) {
853 MVT EltVT = ValueVTs[i];
854 if (isa<UndefValue>(C))
855 Constants[i] = DAG.getNode(ISD::UNDEF, getCurDebugLoc(), EltVT);
856 else if (EltVT.isFloatingPoint())
857 Constants[i] = DAG.getConstantFP(0, EltVT);
859 Constants[i] = DAG.getConstant(0, EltVT);
861 return DAG.getMergeValues(&Constants[0], NumElts);
864 const VectorType *VecTy = cast<VectorType>(V->getType());
865 unsigned NumElements = VecTy->getNumElements();
867 // Now that we know the number and type of the elements, get that number of
868 // elements into the Ops array based on what kind of constant it is.
869 SmallVector<SDValue, 16> Ops;
870 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
871 for (unsigned i = 0; i != NumElements; ++i)
872 Ops.push_back(getValue(CP->getOperand(i)));
874 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
875 "Unknown vector constant!");
876 MVT EltVT = TLI.getValueType(VecTy->getElementType());
879 if (isa<UndefValue>(C))
880 Op = DAG.getNode(ISD::UNDEF, getCurDebugLoc(), EltVT);
881 else if (EltVT.isFloatingPoint())
882 Op = DAG.getConstantFP(0, EltVT);
884 Op = DAG.getConstant(0, EltVT);
885 Ops.assign(NumElements, Op);
888 // Create a BUILD_VECTOR node.
889 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
890 VT, &Ops[0], Ops.size());
893 // If this is a static alloca, generate it as the frameindex instead of
895 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
896 DenseMap<const AllocaInst*, int>::iterator SI =
897 FuncInfo.StaticAllocaMap.find(AI);
898 if (SI != FuncInfo.StaticAllocaMap.end())
899 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
902 unsigned InReg = FuncInfo.ValueMap[V];
903 assert(InReg && "Value not in map!");
905 RegsForValue RFV(TLI, InReg, V->getType());
906 SDValue Chain = DAG.getEntryNode();
907 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
911 void SelectionDAGLowering::visitRet(ReturnInst &I) {
912 if (I.getNumOperands() == 0) {
913 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(),
914 MVT::Other, getControlRoot()));
918 SmallVector<SDValue, 8> NewValues;
919 NewValues.push_back(getControlRoot());
920 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
921 SmallVector<MVT, 4> ValueVTs;
922 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
923 unsigned NumValues = ValueVTs.size();
924 if (NumValues == 0) continue;
926 SDValue RetOp = getValue(I.getOperand(i));
927 for (unsigned j = 0, f = NumValues; j != f; ++j) {
928 MVT VT = ValueVTs[j];
930 // FIXME: C calling convention requires the return type to be promoted to
931 // at least 32-bit. But this is not necessary for non-C calling
933 if (VT.isInteger()) {
934 MVT MinVT = TLI.getRegisterType(MVT::i32);
935 if (VT.bitsLT(MinVT))
939 unsigned NumParts = TLI.getNumRegisters(VT);
940 MVT PartVT = TLI.getRegisterType(VT);
941 SmallVector<SDValue, 4> Parts(NumParts);
942 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
944 const Function *F = I.getParent()->getParent();
945 if (F->paramHasAttr(0, Attribute::SExt))
946 ExtendKind = ISD::SIGN_EXTEND;
947 else if (F->paramHasAttr(0, Attribute::ZExt))
948 ExtendKind = ISD::ZERO_EXTEND;
950 getCopyToParts(DAG, getCurDebugLoc(),
951 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
952 &Parts[0], NumParts, PartVT, ExtendKind);
954 // 'inreg' on function refers to return value
955 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
956 if (F->paramHasAttr(0, Attribute::InReg))
958 for (unsigned i = 0; i < NumParts; ++i) {
959 NewValues.push_back(Parts[i]);
960 NewValues.push_back(DAG.getArgFlags(Flags));
964 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
965 &NewValues[0], NewValues.size()));
968 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
969 /// the current basic block, add it to ValueMap now so that we'll get a
971 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
972 // No need to export constants.
973 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
976 if (FuncInfo.isExportedInst(V)) return;
978 unsigned Reg = FuncInfo.InitializeRegForValue(V);
979 CopyValueToVirtualRegister(V, Reg);
982 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
983 const BasicBlock *FromBB) {
984 // The operands of the setcc have to be in this block. We don't know
985 // how to export them from some other block.
986 if (Instruction *VI = dyn_cast<Instruction>(V)) {
987 // Can export from current BB.
988 if (VI->getParent() == FromBB)
991 // Is already exported, noop.
992 return FuncInfo.isExportedInst(V);
995 // If this is an argument, we can export it if the BB is the entry block or
996 // if it is already exported.
997 if (isa<Argument>(V)) {
998 if (FromBB == &FromBB->getParent()->getEntryBlock())
1001 // Otherwise, can only export this if it is already exported.
1002 return FuncInfo.isExportedInst(V);
1005 // Otherwise, constants can always be exported.
1009 static bool InBlock(const Value *V, const BasicBlock *BB) {
1010 if (const Instruction *I = dyn_cast<Instruction>(V))
1011 return I->getParent() == BB;
1015 /// getFCmpCondCode - Return the ISD condition code corresponding to
1016 /// the given LLVM IR floating-point condition code. This includes
1017 /// consideration of global floating-point math flags.
1019 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1020 ISD::CondCode FPC, FOC;
1022 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1023 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1024 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1025 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1026 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1027 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1028 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1029 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1030 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1031 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1032 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1033 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1034 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1035 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1036 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1037 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1039 assert(0 && "Invalid FCmp predicate opcode!");
1040 FOC = FPC = ISD::SETFALSE;
1043 if (FiniteOnlyFPMath())
1049 /// getICmpCondCode - Return the ISD condition code corresponding to
1050 /// the given LLVM IR integer condition code.
1052 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1054 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1055 case ICmpInst::ICMP_NE: return ISD::SETNE;
1056 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1057 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1058 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1059 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1060 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1061 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1062 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1063 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1065 assert(0 && "Invalid ICmp predicate opcode!");
1070 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1071 /// This function emits a branch and is used at the leaves of an OR or an
1072 /// AND operator tree.
1075 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1076 MachineBasicBlock *TBB,
1077 MachineBasicBlock *FBB,
1078 MachineBasicBlock *CurBB) {
1079 const BasicBlock *BB = CurBB->getBasicBlock();
1081 // If the leaf of the tree is a comparison, merge the condition into
1083 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1084 // The operands of the cmp have to be in this block. We don't know
1085 // how to export them from some other block. If this is the first block
1086 // of the sequence, no exporting is needed.
1087 if (CurBB == CurMBB ||
1088 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1089 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1090 ISD::CondCode Condition;
1091 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1092 Condition = getICmpCondCode(IC->getPredicate());
1093 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1094 Condition = getFCmpCondCode(FC->getPredicate());
1096 Condition = ISD::SETEQ; // silence warning.
1097 assert(0 && "Unknown compare instruction");
1100 CaseBlock CB(Condition, BOp->getOperand(0),
1101 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1102 SwitchCases.push_back(CB);
1107 // Create a CaseBlock record representing this branch.
1108 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
1109 NULL, TBB, FBB, CurBB);
1110 SwitchCases.push_back(CB);
1113 /// FindMergedConditions - If Cond is an expression like
1114 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1115 MachineBasicBlock *TBB,
1116 MachineBasicBlock *FBB,
1117 MachineBasicBlock *CurBB,
1119 // If this node is not part of the or/and tree, emit it as a branch.
1120 Instruction *BOp = dyn_cast<Instruction>(Cond);
1121 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1122 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1123 BOp->getParent() != CurBB->getBasicBlock() ||
1124 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1125 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1126 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1130 // Create TmpBB after CurBB.
1131 MachineFunction::iterator BBI = CurBB;
1132 MachineFunction &MF = DAG.getMachineFunction();
1133 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1134 CurBB->getParent()->insert(++BBI, TmpBB);
1136 if (Opc == Instruction::Or) {
1137 // Codegen X | Y as:
1145 // Emit the LHS condition.
1146 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1148 // Emit the RHS condition into TmpBB.
1149 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1151 assert(Opc == Instruction::And && "Unknown merge op!");
1152 // Codegen X & Y as:
1159 // This requires creation of TmpBB after CurBB.
1161 // Emit the LHS condition.
1162 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1164 // Emit the RHS condition into TmpBB.
1165 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1169 /// If the set of cases should be emitted as a series of branches, return true.
1170 /// If we should emit this as a bunch of and/or'd together conditions, return
1173 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1174 if (Cases.size() != 2) return true;
1176 // If this is two comparisons of the same values or'd or and'd together, they
1177 // will get folded into a single comparison, so don't emit two blocks.
1178 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1179 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1180 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1181 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1188 void SelectionDAGLowering::visitBr(BranchInst &I) {
1189 // Update machine-CFG edges.
1190 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1192 // Figure out which block is immediately after the current one.
1193 MachineBasicBlock *NextBlock = 0;
1194 MachineFunction::iterator BBI = CurMBB;
1195 if (++BBI != CurMBB->getParent()->end())
1198 if (I.isUnconditional()) {
1199 // Update machine-CFG edges.
1200 CurMBB->addSuccessor(Succ0MBB);
1202 // If this is not a fall-through branch, emit the branch.
1203 if (Succ0MBB != NextBlock)
1204 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1205 MVT::Other, getControlRoot(),
1206 DAG.getBasicBlock(Succ0MBB)));
1210 // If this condition is one of the special cases we handle, do special stuff
1212 Value *CondVal = I.getCondition();
1213 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1215 // If this is a series of conditions that are or'd or and'd together, emit
1216 // this as a sequence of branches instead of setcc's with and/or operations.
1217 // For example, instead of something like:
1230 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1231 if (BOp->hasOneUse() &&
1232 (BOp->getOpcode() == Instruction::And ||
1233 BOp->getOpcode() == Instruction::Or)) {
1234 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1235 // If the compares in later blocks need to use values not currently
1236 // exported from this block, export them now. This block should always
1237 // be the first entry.
1238 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1240 // Allow some cases to be rejected.
1241 if (ShouldEmitAsBranches(SwitchCases)) {
1242 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1243 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1244 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1247 // Emit the branch for this block.
1248 visitSwitchCase(SwitchCases[0]);
1249 SwitchCases.erase(SwitchCases.begin());
1253 // Okay, we decided not to do this, remove any inserted MBB's and clear
1255 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1256 CurMBB->getParent()->erase(SwitchCases[i].ThisBB);
1258 SwitchCases.clear();
1262 // Create a CaseBlock record representing this branch.
1263 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
1264 NULL, Succ0MBB, Succ1MBB, CurMBB);
1265 // Use visitSwitchCase to actually insert the fast branch sequence for this
1267 visitSwitchCase(CB);
1270 /// visitSwitchCase - Emits the necessary code to represent a single node in
1271 /// the binary search tree resulting from lowering a switch instruction.
1272 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1274 SDValue CondLHS = getValue(CB.CmpLHS);
1276 // Build the setcc now.
1277 if (CB.CmpMHS == NULL) {
1278 // Fold "(X == true)" to X and "(X == false)" to !X to
1279 // handle common cases produced by branch lowering.
1280 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
1282 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
1283 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1284 Cond = DAG.getNode(ISD::XOR, getCurDebugLoc(),
1285 CondLHS.getValueType(), CondLHS, True);
1287 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1289 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1291 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1292 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1294 SDValue CmpOp = getValue(CB.CmpMHS);
1295 MVT VT = CmpOp.getValueType();
1297 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1298 Cond = DAG.getSetCC(MVT::i1, CmpOp, DAG.getConstant(High, VT), ISD::SETLE);
1300 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(),
1301 VT, CmpOp, DAG.getConstant(Low, VT));
1302 Cond = DAG.getSetCC(MVT::i1, SUB,
1303 DAG.getConstant(High-Low, VT), ISD::SETULE);
1307 // Update successor info
1308 CurMBB->addSuccessor(CB.TrueBB);
1309 CurMBB->addSuccessor(CB.FalseBB);
1311 // Set NextBlock to be the MBB immediately after the current one, if any.
1312 // This is used to avoid emitting unnecessary branches to the next block.
1313 MachineBasicBlock *NextBlock = 0;
1314 MachineFunction::iterator BBI = CurMBB;
1315 if (++BBI != CurMBB->getParent()->end())
1318 // If the lhs block is the next block, invert the condition so that we can
1319 // fall through to the lhs instead of the rhs block.
1320 if (CB.TrueBB == NextBlock) {
1321 std::swap(CB.TrueBB, CB.FalseBB);
1322 SDValue True = DAG.getConstant(1, Cond.getValueType());
1323 Cond = DAG.getNode(ISD::XOR, getCurDebugLoc(),
1324 Cond.getValueType(), Cond, True);
1326 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1327 MVT::Other, getControlRoot(), Cond,
1328 DAG.getBasicBlock(CB.TrueBB));
1330 // If the branch was constant folded, fix up the CFG.
1331 if (BrCond.getOpcode() == ISD::BR) {
1332 CurMBB->removeSuccessor(CB.FalseBB);
1333 DAG.setRoot(BrCond);
1335 // Otherwise, go ahead and insert the false branch.
1336 if (BrCond == getControlRoot())
1337 CurMBB->removeSuccessor(CB.TrueBB);
1339 if (CB.FalseBB == NextBlock)
1340 DAG.setRoot(BrCond);
1342 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1343 DAG.getBasicBlock(CB.FalseBB)));
1347 /// visitJumpTable - Emit JumpTable node in the current MBB
1348 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1349 // Emit the code for the jump table
1350 assert(JT.Reg != -1U && "Should lower JT Header first!");
1351 MVT PTy = TLI.getPointerTy();
1352 SDValue Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy);
1353 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1354 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1355 MVT::Other, Index.getValue(1),
1359 /// visitJumpTableHeader - This function emits necessary code to produce index
1360 /// in the JumpTable from switch case.
1361 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1362 JumpTableHeader &JTH) {
1363 // Subtract the lowest switch case value from the value being switched on and
1364 // conditional branch to default mbb if the result is greater than the
1365 // difference between smallest and largest cases.
1366 SDValue SwitchOp = getValue(JTH.SValue);
1367 MVT VT = SwitchOp.getValueType();
1368 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1369 DAG.getConstant(JTH.First, VT));
1371 // The SDNode we just created, which holds the value being switched on minus
1372 // the the smallest case value, needs to be copied to a virtual register so it
1373 // can be used as an index into the jump table in a subsequent basic block.
1374 // This value may be smaller or larger than the target's pointer type, and
1375 // therefore require extension or truncating.
1376 if (VT.bitsGT(TLI.getPointerTy()))
1377 SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1378 TLI.getPointerTy(), SUB);
1380 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1381 TLI.getPointerTy(), SUB);
1383 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1384 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), JumpTableReg, SwitchOp);
1385 JT.Reg = JumpTableReg;
1387 // Emit the range check for the jump table, and branch to the default block
1388 // for the switch statement if the value being switched on exceeds the largest
1389 // case in the switch.
1390 SDValue CMP = DAG.getSetCC(TLI.getSetCCResultType(SUB.getValueType()), SUB,
1391 DAG.getConstant(JTH.Last-JTH.First,VT),
1394 // Set NextBlock to be the MBB immediately after the current one, if any.
1395 // This is used to avoid emitting unnecessary branches to the next block.
1396 MachineBasicBlock *NextBlock = 0;
1397 MachineFunction::iterator BBI = CurMBB;
1398 if (++BBI != CurMBB->getParent()->end())
1401 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1402 MVT::Other, CopyTo, CMP,
1403 DAG.getBasicBlock(JT.Default));
1405 if (JT.MBB == NextBlock)
1406 DAG.setRoot(BrCond);
1408 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1409 DAG.getBasicBlock(JT.MBB)));
1412 /// visitBitTestHeader - This function emits necessary code to produce value
1413 /// suitable for "bit tests"
1414 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1415 // Subtract the minimum value
1416 SDValue SwitchOp = getValue(B.SValue);
1417 MVT VT = SwitchOp.getValueType();
1418 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1419 DAG.getConstant(B.First, VT));
1422 SDValue RangeCmp = DAG.getSetCC(TLI.getSetCCResultType(SUB.getValueType()), SUB,
1423 DAG.getConstant(B.Range, VT),
1427 if (VT.bitsGT(TLI.getShiftAmountTy()))
1428 ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1429 TLI.getShiftAmountTy(), SUB);
1431 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1432 TLI.getShiftAmountTy(), SUB);
1434 B.Reg = FuncInfo.MakeReg(TLI.getShiftAmountTy());
1435 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), B.Reg, ShiftOp);
1437 // Set NextBlock to be the MBB immediately after the current one, if any.
1438 // This is used to avoid emitting unnecessary branches to the next block.
1439 MachineBasicBlock *NextBlock = 0;
1440 MachineFunction::iterator BBI = CurMBB;
1441 if (++BBI != CurMBB->getParent()->end())
1444 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1446 CurMBB->addSuccessor(B.Default);
1447 CurMBB->addSuccessor(MBB);
1449 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1450 MVT::Other, CopyTo, RangeCmp,
1451 DAG.getBasicBlock(B.Default));
1453 if (MBB == NextBlock)
1454 DAG.setRoot(BrRange);
1456 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1457 DAG.getBasicBlock(MBB)));
1460 /// visitBitTestCase - this function produces one "bit test"
1461 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1464 // Make desired shift
1465 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), Reg,
1466 TLI.getShiftAmountTy());
1467 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1469 DAG.getConstant(1, TLI.getPointerTy()),
1472 // Emit bit tests and jumps
1473 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1474 TLI.getPointerTy(), SwitchVal,
1475 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1476 SDValue AndCmp = DAG.getSetCC(TLI.getSetCCResultType(AndOp.getValueType()),
1477 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1480 CurMBB->addSuccessor(B.TargetBB);
1481 CurMBB->addSuccessor(NextMBB);
1483 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1484 MVT::Other, getControlRoot(),
1485 AndCmp, DAG.getBasicBlock(B.TargetBB));
1487 // Set NextBlock to be the MBB immediately after the current one, if any.
1488 // This is used to avoid emitting unnecessary branches to the next block.
1489 MachineBasicBlock *NextBlock = 0;
1490 MachineFunction::iterator BBI = CurMBB;
1491 if (++BBI != CurMBB->getParent()->end())
1494 if (NextMBB == NextBlock)
1497 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1498 DAG.getBasicBlock(NextMBB)));
1501 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1502 // Retrieve successors.
1503 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1504 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1506 const Value *Callee(I.getCalledValue());
1507 if (isa<InlineAsm>(Callee))
1510 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1512 // If the value of the invoke is used outside of its defining block, make it
1513 // available as a virtual register.
1514 if (!I.use_empty()) {
1515 DenseMap<const Value*, unsigned>::iterator VMI = FuncInfo.ValueMap.find(&I);
1516 if (VMI != FuncInfo.ValueMap.end())
1517 CopyValueToVirtualRegister(&I, VMI->second);
1520 // Update successor info
1521 CurMBB->addSuccessor(Return);
1522 CurMBB->addSuccessor(LandingPad);
1524 // Drop into normal successor.
1525 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1526 MVT::Other, getControlRoot(),
1527 DAG.getBasicBlock(Return)));
1530 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1533 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1534 /// small case ranges).
1535 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1536 CaseRecVector& WorkList,
1538 MachineBasicBlock* Default) {
1539 Case& BackCase = *(CR.Range.second-1);
1541 // Size is the number of Cases represented by this range.
1542 size_t Size = CR.Range.second - CR.Range.first;
1546 // Get the MachineFunction which holds the current MBB. This is used when
1547 // inserting any additional MBBs necessary to represent the switch.
1548 MachineFunction *CurMF = CurMBB->getParent();
1550 // Figure out which block is immediately after the current one.
1551 MachineBasicBlock *NextBlock = 0;
1552 MachineFunction::iterator BBI = CR.CaseBB;
1554 if (++BBI != CurMBB->getParent()->end())
1557 // TODO: If any two of the cases has the same destination, and if one value
1558 // is the same as the other, but has one bit unset that the other has set,
1559 // use bit manipulation to do two compares at once. For example:
1560 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1562 // Rearrange the case blocks so that the last one falls through if possible.
1563 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1564 // The last case block won't fall through into 'NextBlock' if we emit the
1565 // branches in this order. See if rearranging a case value would help.
1566 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1567 if (I->BB == NextBlock) {
1568 std::swap(*I, BackCase);
1574 // Create a CaseBlock record representing a conditional branch to
1575 // the Case's target mbb if the value being switched on SV is equal
1577 MachineBasicBlock *CurBlock = CR.CaseBB;
1578 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1579 MachineBasicBlock *FallThrough;
1581 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1582 CurMF->insert(BBI, FallThrough);
1584 // If the last case doesn't match, go to the default block.
1585 FallThrough = Default;
1588 Value *RHS, *LHS, *MHS;
1590 if (I->High == I->Low) {
1591 // This is just small small case range :) containing exactly 1 case
1593 LHS = SV; RHS = I->High; MHS = NULL;
1596 LHS = I->Low; MHS = SV; RHS = I->High;
1598 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1600 // If emitting the first comparison, just call visitSwitchCase to emit the
1601 // code into the current block. Otherwise, push the CaseBlock onto the
1602 // vector to be later processed by SDISel, and insert the node's MBB
1603 // before the next MBB.
1604 if (CurBlock == CurMBB)
1605 visitSwitchCase(CB);
1607 SwitchCases.push_back(CB);
1609 CurBlock = FallThrough;
1615 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1616 return !DisableJumpTables &&
1617 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1618 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1621 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1622 APInt LastExt(Last), FirstExt(First);
1623 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1624 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1625 return (LastExt - FirstExt + 1ULL);
1628 /// handleJTSwitchCase - Emit jumptable for current switch case range
1629 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1630 CaseRecVector& WorkList,
1632 MachineBasicBlock* Default) {
1633 Case& FrontCase = *CR.Range.first;
1634 Case& BackCase = *(CR.Range.second-1);
1636 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1637 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1640 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1644 if (!areJTsAllowed(TLI) || TSize <= 3)
1647 APInt Range = ComputeRange(First, Last);
1648 double Density = (double)TSize / Range.roundToDouble();
1652 DEBUG(errs() << "Lowering jump table\n"
1653 << "First entry: " << First << ". Last entry: " << Last << '\n'
1654 << "Range: " << Range
1655 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1657 // Get the MachineFunction which holds the current MBB. This is used when
1658 // inserting any additional MBBs necessary to represent the switch.
1659 MachineFunction *CurMF = CurMBB->getParent();
1661 // Figure out which block is immediately after the current one.
1662 MachineBasicBlock *NextBlock = 0;
1663 MachineFunction::iterator BBI = CR.CaseBB;
1665 if (++BBI != CurMBB->getParent()->end())
1668 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1670 // Create a new basic block to hold the code for loading the address
1671 // of the jump table, and jumping to it. Update successor information;
1672 // we will either branch to the default case for the switch, or the jump
1674 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1675 CurMF->insert(BBI, JumpTableBB);
1676 CR.CaseBB->addSuccessor(Default);
1677 CR.CaseBB->addSuccessor(JumpTableBB);
1679 // Build a vector of destination BBs, corresponding to each target
1680 // of the jump table. If the value of the jump table slot corresponds to
1681 // a case statement, push the case's BB onto the vector, otherwise, push
1683 std::vector<MachineBasicBlock*> DestBBs;
1685 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1686 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1687 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1689 if (Low.sle(TEI) && TEI.sle(High)) {
1690 DestBBs.push_back(I->BB);
1694 DestBBs.push_back(Default);
1698 // Update successor info. Add one edge to each unique successor.
1699 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1700 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1701 E = DestBBs.end(); I != E; ++I) {
1702 if (!SuccsHandled[(*I)->getNumber()]) {
1703 SuccsHandled[(*I)->getNumber()] = true;
1704 JumpTableBB->addSuccessor(*I);
1708 // Create a jump table index for this jump table, or return an existing
1710 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1712 // Set the jump table information so that we can codegen it as a second
1713 // MachineBasicBlock
1714 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1715 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1716 if (CR.CaseBB == CurMBB)
1717 visitJumpTableHeader(JT, JTH);
1719 JTCases.push_back(JumpTableBlock(JTH, JT));
1724 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1726 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1727 CaseRecVector& WorkList,
1729 MachineBasicBlock* Default) {
1730 // Get the MachineFunction which holds the current MBB. This is used when
1731 // inserting any additional MBBs necessary to represent the switch.
1732 MachineFunction *CurMF = CurMBB->getParent();
1734 // Figure out which block is immediately after the current one.
1735 MachineBasicBlock *NextBlock = 0;
1736 MachineFunction::iterator BBI = CR.CaseBB;
1738 if (++BBI != CurMBB->getParent()->end())
1741 Case& FrontCase = *CR.Range.first;
1742 Case& BackCase = *(CR.Range.second-1);
1743 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1745 // Size is the number of Cases represented by this range.
1746 unsigned Size = CR.Range.second - CR.Range.first;
1748 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1749 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1751 CaseItr Pivot = CR.Range.first + Size/2;
1753 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1754 // (heuristically) allow us to emit JumpTable's later.
1756 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1760 size_t LSize = FrontCase.size();
1761 size_t RSize = TSize-LSize;
1762 DEBUG(errs() << "Selecting best pivot: \n"
1763 << "First: " << First << ", Last: " << Last <<'\n'
1764 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1765 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1767 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1768 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1769 APInt Range = ComputeRange(LEnd, RBegin);
1770 assert((Range - 2ULL).isNonNegative() &&
1771 "Invalid case distance");
1772 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1773 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1774 double Metric = Range.logBase2()*(LDensity+RDensity);
1775 // Should always split in some non-trivial place
1776 DEBUG(errs() <<"=>Step\n"
1777 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1778 << "LDensity: " << LDensity
1779 << ", RDensity: " << RDensity << '\n'
1780 << "Metric: " << Metric << '\n');
1781 if (FMetric < Metric) {
1784 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1790 if (areJTsAllowed(TLI)) {
1791 // If our case is dense we *really* should handle it earlier!
1792 assert((FMetric > 0) && "Should handle dense range earlier!");
1794 Pivot = CR.Range.first + Size/2;
1797 CaseRange LHSR(CR.Range.first, Pivot);
1798 CaseRange RHSR(Pivot, CR.Range.second);
1799 Constant *C = Pivot->Low;
1800 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1802 // We know that we branch to the LHS if the Value being switched on is
1803 // less than the Pivot value, C. We use this to optimize our binary
1804 // tree a bit, by recognizing that if SV is greater than or equal to the
1805 // LHS's Case Value, and that Case Value is exactly one less than the
1806 // Pivot's Value, then we can branch directly to the LHS's Target,
1807 // rather than creating a leaf node for it.
1808 if ((LHSR.second - LHSR.first) == 1 &&
1809 LHSR.first->High == CR.GE &&
1810 cast<ConstantInt>(C)->getValue() ==
1811 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1812 TrueBB = LHSR.first->BB;
1814 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1815 CurMF->insert(BBI, TrueBB);
1816 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1819 // Similar to the optimization above, if the Value being switched on is
1820 // known to be less than the Constant CR.LT, and the current Case Value
1821 // is CR.LT - 1, then we can branch directly to the target block for
1822 // the current Case Value, rather than emitting a RHS leaf node for it.
1823 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1824 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1825 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1826 FalseBB = RHSR.first->BB;
1828 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1829 CurMF->insert(BBI, FalseBB);
1830 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1833 // Create a CaseBlock record representing a conditional branch to
1834 // the LHS node if the value being switched on SV is less than C.
1835 // Otherwise, branch to LHS.
1836 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1838 if (CR.CaseBB == CurMBB)
1839 visitSwitchCase(CB);
1841 SwitchCases.push_back(CB);
1846 /// handleBitTestsSwitchCase - if current case range has few destination and
1847 /// range span less, than machine word bitwidth, encode case range into series
1848 /// of masks and emit bit tests with these masks.
1849 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1850 CaseRecVector& WorkList,
1852 MachineBasicBlock* Default){
1853 unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
1855 Case& FrontCase = *CR.Range.first;
1856 Case& BackCase = *(CR.Range.second-1);
1858 // Get the MachineFunction which holds the current MBB. This is used when
1859 // inserting any additional MBBs necessary to represent the switch.
1860 MachineFunction *CurMF = CurMBB->getParent();
1863 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1865 // Single case counts one, case range - two.
1866 numCmps += (I->Low == I->High ? 1 : 2);
1869 // Count unique destinations
1870 SmallSet<MachineBasicBlock*, 4> Dests;
1871 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1872 Dests.insert(I->BB);
1873 if (Dests.size() > 3)
1874 // Don't bother the code below, if there are too much unique destinations
1877 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1878 << "Total number of comparisons: " << numCmps << '\n');
1880 // Compute span of values.
1881 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1882 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1883 APInt cmpRange = maxValue - minValue;
1885 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1886 << "Low bound: " << minValue << '\n'
1887 << "High bound: " << maxValue << '\n');
1889 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1890 (!(Dests.size() == 1 && numCmps >= 3) &&
1891 !(Dests.size() == 2 && numCmps >= 5) &&
1892 !(Dests.size() >= 3 && numCmps >= 6)))
1895 DEBUG(errs() << "Emitting bit tests\n");
1896 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1898 // Optimize the case where all the case values fit in a
1899 // word without having to subtract minValue. In this case,
1900 // we can optimize away the subtraction.
1901 if (minValue.isNonNegative() &&
1902 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1903 cmpRange = maxValue;
1905 lowBound = minValue;
1908 CaseBitsVector CasesBits;
1909 unsigned i, count = 0;
1911 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1912 MachineBasicBlock* Dest = I->BB;
1913 for (i = 0; i < count; ++i)
1914 if (Dest == CasesBits[i].BB)
1918 assert((count < 3) && "Too much destinations to test!");
1919 CasesBits.push_back(CaseBits(0, Dest, 0));
1923 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1924 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1926 uint64_t lo = (lowValue - lowBound).getZExtValue();
1927 uint64_t hi = (highValue - lowBound).getZExtValue();
1929 for (uint64_t j = lo; j <= hi; j++) {
1930 CasesBits[i].Mask |= 1ULL << j;
1931 CasesBits[i].Bits++;
1935 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
1939 // Figure out which block is immediately after the current one.
1940 MachineFunction::iterator BBI = CR.CaseBB;
1943 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1945 DEBUG(errs() << "Cases:\n");
1946 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
1947 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
1948 << ", Bits: " << CasesBits[i].Bits
1949 << ", BB: " << CasesBits[i].BB << '\n');
1951 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1952 CurMF->insert(BBI, CaseBB);
1953 BTC.push_back(BitTestCase(CasesBits[i].Mask,
1958 BitTestBlock BTB(lowBound, cmpRange, SV,
1959 -1U, (CR.CaseBB == CurMBB),
1960 CR.CaseBB, Default, BTC);
1962 if (CR.CaseBB == CurMBB)
1963 visitBitTestHeader(BTB);
1965 BitTestCases.push_back(BTB);
1971 /// Clusterify - Transform simple list of Cases into list of CaseRange's
1972 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
1973 const SwitchInst& SI) {
1976 // Start with "simple" cases
1977 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
1978 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
1979 Cases.push_back(Case(SI.getSuccessorValue(i),
1980 SI.getSuccessorValue(i),
1983 std::sort(Cases.begin(), Cases.end(), CaseCmp());
1985 // Merge case into clusters
1986 if (Cases.size() >= 2)
1987 // Must recompute end() each iteration because it may be
1988 // invalidated by erase if we hold on to it
1989 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
1990 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
1991 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
1992 MachineBasicBlock* nextBB = J->BB;
1993 MachineBasicBlock* currentBB = I->BB;
1995 // If the two neighboring cases go to the same destination, merge them
1996 // into a single case.
1997 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2005 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2006 if (I->Low != I->High)
2007 // A range counts double, since it requires two compares.
2014 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2015 // Figure out which block is immediately after the current one.
2016 MachineBasicBlock *NextBlock = 0;
2017 MachineFunction::iterator BBI = CurMBB;
2019 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2021 // If there is only the default destination, branch to it if it is not the
2022 // next basic block. Otherwise, just fall through.
2023 if (SI.getNumOperands() == 2) {
2024 // Update machine-CFG edges.
2026 // If this is not a fall-through branch, emit the branch.
2027 CurMBB->addSuccessor(Default);
2028 if (Default != NextBlock)
2029 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2030 MVT::Other, getControlRoot(),
2031 DAG.getBasicBlock(Default)));
2035 // If there are any non-default case statements, create a vector of Cases
2036 // representing each one, and sort the vector so that we can efficiently
2037 // create a binary search tree from them.
2039 size_t numCmps = Clusterify(Cases, SI);
2040 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2041 << ". Total compares: " << numCmps << '\n');
2044 // Get the Value to be switched on and default basic blocks, which will be
2045 // inserted into CaseBlock records, representing basic blocks in the binary
2047 Value *SV = SI.getOperand(0);
2049 // Push the initial CaseRec onto the worklist
2050 CaseRecVector WorkList;
2051 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2053 while (!WorkList.empty()) {
2054 // Grab a record representing a case range to process off the worklist
2055 CaseRec CR = WorkList.back();
2056 WorkList.pop_back();
2058 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2061 // If the range has few cases (two or less) emit a series of specific
2063 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2066 // If the switch has more than 5 blocks, and at least 40% dense, and the
2067 // target supports indirect branches, then emit a jump table rather than
2068 // lowering the switch to a binary tree of conditional branches.
2069 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2072 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2073 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2074 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2079 void SelectionDAGLowering::visitSub(User &I) {
2080 // -0.0 - X --> fneg
2081 const Type *Ty = I.getType();
2082 if (isa<VectorType>(Ty)) {
2083 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2084 const VectorType *DestTy = cast<VectorType>(I.getType());
2085 const Type *ElTy = DestTy->getElementType();
2086 if (ElTy->isFloatingPoint()) {
2087 unsigned VL = DestTy->getNumElements();
2088 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2089 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2091 SDValue Op2 = getValue(I.getOperand(1));
2092 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2093 Op2.getValueType(), Op2));
2099 if (Ty->isFloatingPoint()) {
2100 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2101 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2102 SDValue Op2 = getValue(I.getOperand(1));
2103 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2104 Op2.getValueType(), Op2));
2109 visitBinary(I, Ty->isFPOrFPVector() ? ISD::FSUB : ISD::SUB);
2112 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2113 SDValue Op1 = getValue(I.getOperand(0));
2114 SDValue Op2 = getValue(I.getOperand(1));
2116 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2117 Op1.getValueType(), Op1, Op2));
2120 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2121 SDValue Op1 = getValue(I.getOperand(0));
2122 SDValue Op2 = getValue(I.getOperand(1));
2123 if (!isa<VectorType>(I.getType())) {
2124 if (TLI.getShiftAmountTy().bitsLT(Op2.getValueType()))
2125 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2126 TLI.getShiftAmountTy(), Op2);
2127 else if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
2128 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2129 TLI.getShiftAmountTy(), Op2);
2132 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2133 Op1.getValueType(), Op1, Op2));
2136 void SelectionDAGLowering::visitICmp(User &I) {
2137 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2138 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2139 predicate = IC->getPredicate();
2140 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2141 predicate = ICmpInst::Predicate(IC->getPredicate());
2142 SDValue Op1 = getValue(I.getOperand(0));
2143 SDValue Op2 = getValue(I.getOperand(1));
2144 ISD::CondCode Opcode = getICmpCondCode(predicate);
2145 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
2148 void SelectionDAGLowering::visitFCmp(User &I) {
2149 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2150 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2151 predicate = FC->getPredicate();
2152 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2153 predicate = FCmpInst::Predicate(FC->getPredicate());
2154 SDValue Op1 = getValue(I.getOperand(0));
2155 SDValue Op2 = getValue(I.getOperand(1));
2156 ISD::CondCode Condition = getFCmpCondCode(predicate);
2157 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition));
2160 void SelectionDAGLowering::visitVICmp(User &I) {
2161 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2162 if (VICmpInst *IC = dyn_cast<VICmpInst>(&I))
2163 predicate = IC->getPredicate();
2164 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2165 predicate = ICmpInst::Predicate(IC->getPredicate());
2166 SDValue Op1 = getValue(I.getOperand(0));
2167 SDValue Op2 = getValue(I.getOperand(1));
2168 ISD::CondCode Opcode = getICmpCondCode(predicate);
2169 setValue(&I, DAG.getVSetCC(Op1.getValueType(), Op1, Op2, Opcode));
2172 void SelectionDAGLowering::visitVFCmp(User &I) {
2173 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2174 if (VFCmpInst *FC = dyn_cast<VFCmpInst>(&I))
2175 predicate = FC->getPredicate();
2176 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2177 predicate = FCmpInst::Predicate(FC->getPredicate());
2178 SDValue Op1 = getValue(I.getOperand(0));
2179 SDValue Op2 = getValue(I.getOperand(1));
2180 ISD::CondCode Condition = getFCmpCondCode(predicate);
2181 MVT DestVT = TLI.getValueType(I.getType());
2183 setValue(&I, DAG.getVSetCC(DestVT, Op1, Op2, Condition));
2186 void SelectionDAGLowering::visitSelect(User &I) {
2187 SmallVector<MVT, 4> ValueVTs;
2188 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2189 unsigned NumValues = ValueVTs.size();
2190 if (NumValues != 0) {
2191 SmallVector<SDValue, 4> Values(NumValues);
2192 SDValue Cond = getValue(I.getOperand(0));
2193 SDValue TrueVal = getValue(I.getOperand(1));
2194 SDValue FalseVal = getValue(I.getOperand(2));
2196 for (unsigned i = 0; i != NumValues; ++i)
2197 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2198 TrueVal.getValueType(), Cond,
2199 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2200 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2202 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2203 DAG.getVTList(&ValueVTs[0], NumValues),
2204 &Values[0], NumValues));
2209 void SelectionDAGLowering::visitTrunc(User &I) {
2210 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2211 SDValue N = getValue(I.getOperand(0));
2212 MVT DestVT = TLI.getValueType(I.getType());
2213 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2216 void SelectionDAGLowering::visitZExt(User &I) {
2217 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2218 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2219 SDValue N = getValue(I.getOperand(0));
2220 MVT DestVT = TLI.getValueType(I.getType());
2221 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2224 void SelectionDAGLowering::visitSExt(User &I) {
2225 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2226 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2227 SDValue N = getValue(I.getOperand(0));
2228 MVT DestVT = TLI.getValueType(I.getType());
2229 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2232 void SelectionDAGLowering::visitFPTrunc(User &I) {
2233 // FPTrunc is never a no-op cast, no need to check
2234 SDValue N = getValue(I.getOperand(0));
2235 MVT DestVT = TLI.getValueType(I.getType());
2236 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2237 DestVT, N, DAG.getIntPtrConstant(0)));
2240 void SelectionDAGLowering::visitFPExt(User &I){
2241 // FPTrunc is never a no-op cast, no need to check
2242 SDValue N = getValue(I.getOperand(0));
2243 MVT DestVT = TLI.getValueType(I.getType());
2244 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2247 void SelectionDAGLowering::visitFPToUI(User &I) {
2248 // FPToUI is never a no-op cast, no need to check
2249 SDValue N = getValue(I.getOperand(0));
2250 MVT DestVT = TLI.getValueType(I.getType());
2251 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2254 void SelectionDAGLowering::visitFPToSI(User &I) {
2255 // FPToSI is never a no-op cast, no need to check
2256 SDValue N = getValue(I.getOperand(0));
2257 MVT DestVT = TLI.getValueType(I.getType());
2258 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2261 void SelectionDAGLowering::visitUIToFP(User &I) {
2262 // UIToFP is never a no-op cast, no need to check
2263 SDValue N = getValue(I.getOperand(0));
2264 MVT DestVT = TLI.getValueType(I.getType());
2265 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2268 void SelectionDAGLowering::visitSIToFP(User &I){
2269 // SIToFP is never a no-op cast, no need to check
2270 SDValue N = getValue(I.getOperand(0));
2271 MVT DestVT = TLI.getValueType(I.getType());
2272 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2275 void SelectionDAGLowering::visitPtrToInt(User &I) {
2276 // What to do depends on the size of the integer and the size of the pointer.
2277 // We can either truncate, zero extend, or no-op, accordingly.
2278 SDValue N = getValue(I.getOperand(0));
2279 MVT SrcVT = N.getValueType();
2280 MVT DestVT = TLI.getValueType(I.getType());
2282 if (DestVT.bitsLT(SrcVT))
2283 Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2285 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2286 Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2287 setValue(&I, Result);
2290 void SelectionDAGLowering::visitIntToPtr(User &I) {
2291 // What to do depends on the size of the integer and the size of the pointer.
2292 // We can either truncate, zero extend, or no-op, accordingly.
2293 SDValue N = getValue(I.getOperand(0));
2294 MVT SrcVT = N.getValueType();
2295 MVT DestVT = TLI.getValueType(I.getType());
2296 if (DestVT.bitsLT(SrcVT))
2297 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2299 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2300 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2304 void SelectionDAGLowering::visitBitCast(User &I) {
2305 SDValue N = getValue(I.getOperand(0));
2306 MVT DestVT = TLI.getValueType(I.getType());
2308 // BitCast assures us that source and destination are the same size so this
2309 // is either a BIT_CONVERT or a no-op.
2310 if (DestVT != N.getValueType())
2311 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2312 DestVT, N)); // convert types
2314 setValue(&I, N); // noop cast.
2317 void SelectionDAGLowering::visitInsertElement(User &I) {
2318 SDValue InVec = getValue(I.getOperand(0));
2319 SDValue InVal = getValue(I.getOperand(1));
2320 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2322 getValue(I.getOperand(2)));
2324 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2325 TLI.getValueType(I.getType()),
2326 InVec, InVal, InIdx));
2329 void SelectionDAGLowering::visitExtractElement(User &I) {
2330 SDValue InVec = getValue(I.getOperand(0));
2331 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2333 getValue(I.getOperand(1)));
2334 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2335 TLI.getValueType(I.getType()), InVec, InIdx));
2339 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2340 // from SIndx and increasing to the element length (undefs are allowed).
2341 static bool SequentialMask(SDValue Mask, unsigned SIndx) {
2342 unsigned MaskNumElts = Mask.getNumOperands();
2343 for (unsigned i = 0; i != MaskNumElts; ++i) {
2344 if (Mask.getOperand(i).getOpcode() != ISD::UNDEF) {
2345 unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getZExtValue();
2346 if (Idx != i + SIndx)
2353 void SelectionDAGLowering::visitShuffleVector(User &I) {
2354 SDValue Src1 = getValue(I.getOperand(0));
2355 SDValue Src2 = getValue(I.getOperand(1));
2356 SDValue Mask = getValue(I.getOperand(2));
2358 MVT VT = TLI.getValueType(I.getType());
2359 MVT SrcVT = Src1.getValueType();
2360 int MaskNumElts = Mask.getNumOperands();
2361 int SrcNumElts = SrcVT.getVectorNumElements();
2363 if (SrcNumElts == MaskNumElts) {
2364 setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, getCurDebugLoc(),
2365 VT, Src1, Src2, Mask));
2369 // Normalize the shuffle vector since mask and vector length don't match.
2370 MVT MaskEltVT = Mask.getValueType().getVectorElementType();
2372 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2373 // Mask is longer than the source vectors and is a multiple of the source
2374 // vectors. We can use concatenate vector to make the mask and vectors
2376 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2377 // The shuffle is concatenating two vectors together.
2378 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2383 // Pad both vectors with undefs to make them the same length as the mask.
2384 unsigned NumConcat = MaskNumElts / SrcNumElts;
2385 SDValue UndefVal = DAG.getNode(ISD::UNDEF, getCurDebugLoc(), SrcVT);
2387 SDValue* MOps1 = new SDValue[NumConcat];
2388 SDValue* MOps2 = new SDValue[NumConcat];
2391 for (unsigned i = 1; i != NumConcat; ++i) {
2392 MOps1[i] = UndefVal;
2393 MOps2[i] = UndefVal;
2395 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2396 VT, MOps1, NumConcat);
2397 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2398 VT, MOps2, NumConcat);
2403 // Readjust mask for new input vector length.
2404 SmallVector<SDValue, 8> MappedOps;
2405 for (int i = 0; i != MaskNumElts; ++i) {
2406 if (Mask.getOperand(i).getOpcode() == ISD::UNDEF) {
2407 MappedOps.push_back(Mask.getOperand(i));
2409 int Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getZExtValue();
2410 if (Idx < SrcNumElts)
2411 MappedOps.push_back(DAG.getConstant(Idx, MaskEltVT));
2413 MappedOps.push_back(DAG.getConstant(Idx + MaskNumElts - SrcNumElts,
2417 Mask = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2418 Mask.getValueType(),
2419 &MappedOps[0], MappedOps.size());
2421 setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, getCurDebugLoc(),
2422 VT, Src1, Src2, Mask));
2426 if (SrcNumElts > MaskNumElts) {
2427 // Resulting vector is shorter than the incoming vector.
2428 if (SrcNumElts == MaskNumElts && SequentialMask(Mask,0)) {
2429 // Shuffle extracts 1st vector.
2434 if (SrcNumElts == MaskNumElts && SequentialMask(Mask,MaskNumElts)) {
2435 // Shuffle extracts 2nd vector.
2440 // Analyze the access pattern of the vector to see if we can extract
2441 // two subvectors and do the shuffle. The analysis is done by calculating
2442 // the range of elements the mask access on both vectors.
2443 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2444 int MaxRange[2] = {-1, -1};
2446 for (int i = 0; i != MaskNumElts; ++i) {
2447 SDValue Arg = Mask.getOperand(i);
2448 if (Arg.getOpcode() != ISD::UNDEF) {
2449 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2450 int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
2452 if (Idx >= SrcNumElts) {
2456 if (Idx > MaxRange[Input])
2457 MaxRange[Input] = Idx;
2458 if (Idx < MinRange[Input])
2459 MinRange[Input] = Idx;
2463 // Check if the access is smaller than the vector size and can we find
2464 // a reasonable extract index.
2465 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2466 int StartIdx[2]; // StartIdx to extract from
2467 for (int Input=0; Input < 2; ++Input) {
2468 if (MinRange[Input] == SrcNumElts+1 && MaxRange[Input] == -1) {
2469 RangeUse[Input] = 0; // Unused
2470 StartIdx[Input] = 0;
2471 } else if (MaxRange[Input] - MinRange[Input] < MaskNumElts) {
2472 // Fits within range but we should see if we can find a good
2473 // start index that is a multiple of the mask length.
2474 if (MaxRange[Input] < MaskNumElts) {
2475 RangeUse[Input] = 1; // Extract from beginning of the vector
2476 StartIdx[Input] = 0;
2478 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2479 if (MaxRange[Input] - StartIdx[Input] < MaskNumElts &&
2480 StartIdx[Input] + MaskNumElts < SrcNumElts)
2481 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2486 if (RangeUse[0] == 0 && RangeUse[0] == 0) {
2487 setValue(&I, DAG.getNode(ISD::UNDEF,
2488 getCurDebugLoc(), VT)); // Vectors are not used.
2491 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2492 // Extract appropriate subvector and generate a vector shuffle
2493 for (int Input=0; Input < 2; ++Input) {
2494 SDValue& Src = Input == 0 ? Src1 : Src2;
2495 if (RangeUse[Input] == 0) {
2496 Src = DAG.getNode(ISD::UNDEF, getCurDebugLoc(), VT);
2498 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2499 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2502 // Calculate new mask.
2503 SmallVector<SDValue, 8> MappedOps;
2504 for (int i = 0; i != MaskNumElts; ++i) {
2505 SDValue Arg = Mask.getOperand(i);
2506 if (Arg.getOpcode() == ISD::UNDEF) {
2507 MappedOps.push_back(Arg);
2509 int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
2510 if (Idx < SrcNumElts)
2511 MappedOps.push_back(DAG.getConstant(Idx - StartIdx[0], MaskEltVT));
2513 Idx = Idx - SrcNumElts - StartIdx[1] + MaskNumElts;
2514 MappedOps.push_back(DAG.getConstant(Idx, MaskEltVT));
2518 Mask = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2519 Mask.getValueType(),
2520 &MappedOps[0], MappedOps.size());
2521 setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, getCurDebugLoc(),
2522 VT, Src1, Src2, Mask));
2527 // We can't use either concat vectors or extract subvectors so fall back to
2528 // replacing the shuffle with extract and build vector.
2529 // to insert and build vector.
2530 MVT EltVT = VT.getVectorElementType();
2531 MVT PtrVT = TLI.getPointerTy();
2532 SmallVector<SDValue,8> Ops;
2533 for (int i = 0; i != MaskNumElts; ++i) {
2534 SDValue Arg = Mask.getOperand(i);
2535 if (Arg.getOpcode() == ISD::UNDEF) {
2536 Ops.push_back(DAG.getNode(ISD::UNDEF, getCurDebugLoc(), EltVT));
2538 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2539 int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
2540 if (Idx < SrcNumElts)
2541 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2542 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2544 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2546 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2549 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2550 VT, &Ops[0], Ops.size()));
2553 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2554 const Value *Op0 = I.getOperand(0);
2555 const Value *Op1 = I.getOperand(1);
2556 const Type *AggTy = I.getType();
2557 const Type *ValTy = Op1->getType();
2558 bool IntoUndef = isa<UndefValue>(Op0);
2559 bool FromUndef = isa<UndefValue>(Op1);
2561 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2562 I.idx_begin(), I.idx_end());
2564 SmallVector<MVT, 4> AggValueVTs;
2565 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2566 SmallVector<MVT, 4> ValValueVTs;
2567 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2569 unsigned NumAggValues = AggValueVTs.size();
2570 unsigned NumValValues = ValValueVTs.size();
2571 SmallVector<SDValue, 4> Values(NumAggValues);
2573 SDValue Agg = getValue(Op0);
2574 SDValue Val = getValue(Op1);
2576 // Copy the beginning value(s) from the original aggregate.
2577 for (; i != LinearIndex; ++i)
2578 Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, getCurDebugLoc(),
2580 SDValue(Agg.getNode(), Agg.getResNo() + i);
2581 // Copy values from the inserted value(s).
2582 for (; i != LinearIndex + NumValValues; ++i)
2583 Values[i] = FromUndef ? DAG.getNode(ISD::UNDEF, getCurDebugLoc(),
2585 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2586 // Copy remaining value(s) from the original aggregate.
2587 for (; i != NumAggValues; ++i)
2588 Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, getCurDebugLoc(),
2590 SDValue(Agg.getNode(), Agg.getResNo() + i);
2592 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2593 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2594 &Values[0], NumAggValues));
2597 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2598 const Value *Op0 = I.getOperand(0);
2599 const Type *AggTy = Op0->getType();
2600 const Type *ValTy = I.getType();
2601 bool OutOfUndef = isa<UndefValue>(Op0);
2603 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2604 I.idx_begin(), I.idx_end());
2606 SmallVector<MVT, 4> ValValueVTs;
2607 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2609 unsigned NumValValues = ValValueVTs.size();
2610 SmallVector<SDValue, 4> Values(NumValValues);
2612 SDValue Agg = getValue(Op0);
2613 // Copy out the selected value(s).
2614 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2615 Values[i - LinearIndex] =
2617 DAG.getNode(ISD::UNDEF, getCurDebugLoc(),
2618 Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2619 SDValue(Agg.getNode(), Agg.getResNo() + i);
2621 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2622 DAG.getVTList(&ValValueVTs[0], NumValValues),
2623 &Values[0], NumValValues));
2627 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2628 SDValue N = getValue(I.getOperand(0));
2629 const Type *Ty = I.getOperand(0)->getType();
2631 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2634 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2635 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2638 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2639 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2640 DAG.getIntPtrConstant(Offset));
2642 Ty = StTy->getElementType(Field);
2644 Ty = cast<SequentialType>(Ty)->getElementType();
2646 // If this is a constant subscript, handle it quickly.
2647 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2648 if (CI->getZExtValue() == 0) continue;
2650 TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2651 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2652 DAG.getIntPtrConstant(Offs));
2656 // N = N + Idx * ElementSize;
2657 uint64_t ElementSize = TD->getTypePaddedSize(Ty);
2658 SDValue IdxN = getValue(Idx);
2660 // If the index is smaller or larger than intptr_t, truncate or extend
2662 if (IdxN.getValueType().bitsLT(N.getValueType()))
2663 IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
2664 N.getValueType(), IdxN);
2665 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2666 IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2667 N.getValueType(), IdxN);
2669 // If this is a multiply by a power of two, turn it into a shl
2670 // immediately. This is a very common case.
2671 if (ElementSize != 1) {
2672 if (isPowerOf2_64(ElementSize)) {
2673 unsigned Amt = Log2_64(ElementSize);
2674 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2675 N.getValueType(), IdxN,
2676 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
2678 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2679 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2680 N.getValueType(), IdxN, Scale);
2684 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2685 N.getValueType(), N, IdxN);
2691 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2692 // If this is a fixed sized alloca in the entry block of the function,
2693 // allocate it statically on the stack.
2694 if (FuncInfo.StaticAllocaMap.count(&I))
2695 return; // getValue will auto-populate this.
2697 const Type *Ty = I.getAllocatedType();
2698 uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
2700 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2703 SDValue AllocSize = getValue(I.getArraySize());
2704 MVT IntPtr = TLI.getPointerTy();
2705 if (IntPtr.bitsLT(AllocSize.getValueType()))
2706 AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2708 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2709 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2712 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), IntPtr, AllocSize,
2713 DAG.getIntPtrConstant(TySize));
2715 // Handle alignment. If the requested alignment is less than or equal to
2716 // the stack alignment, ignore it. If the size is greater than or equal to
2717 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2718 unsigned StackAlign =
2719 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2720 if (Align <= StackAlign)
2723 // Round the size of the allocation up to the stack alignment size
2724 // by add SA-1 to the size.
2725 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2726 AllocSize.getValueType(), AllocSize,
2727 DAG.getIntPtrConstant(StackAlign-1));
2728 // Mask out the low bits for alignment purposes.
2729 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2730 AllocSize.getValueType(), AllocSize,
2731 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2733 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2734 const MVT *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
2736 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2739 DAG.setRoot(DSA.getValue(1));
2741 // Inform the Frame Information that we have just allocated a variable-sized
2743 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
2746 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2747 const Value *SV = I.getOperand(0);
2748 SDValue Ptr = getValue(SV);
2750 const Type *Ty = I.getType();
2751 bool isVolatile = I.isVolatile();
2752 unsigned Alignment = I.getAlignment();
2754 SmallVector<MVT, 4> ValueVTs;
2755 SmallVector<uint64_t, 4> Offsets;
2756 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2757 unsigned NumValues = ValueVTs.size();
2762 bool ConstantMemory = false;
2764 // Serialize volatile loads with other side effects.
2766 else if (AA->pointsToConstantMemory(SV)) {
2767 // Do not serialize (non-volatile) loads of constant memory with anything.
2768 Root = DAG.getEntryNode();
2769 ConstantMemory = true;
2771 // Do not serialize non-volatile loads against each other.
2772 Root = DAG.getRoot();
2775 SmallVector<SDValue, 4> Values(NumValues);
2776 SmallVector<SDValue, 4> Chains(NumValues);
2777 MVT PtrVT = Ptr.getValueType();
2778 for (unsigned i = 0; i != NumValues; ++i) {
2779 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2780 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2782 DAG.getConstant(Offsets[i], PtrVT)),
2784 isVolatile, Alignment);
2786 Chains[i] = L.getValue(1);
2789 if (!ConstantMemory) {
2790 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2792 &Chains[0], NumValues);
2796 PendingLoads.push_back(Chain);
2799 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2800 DAG.getVTList(&ValueVTs[0], NumValues),
2801 &Values[0], NumValues));
2805 void SelectionDAGLowering::visitStore(StoreInst &I) {
2806 Value *SrcV = I.getOperand(0);
2807 Value *PtrV = I.getOperand(1);
2809 SmallVector<MVT, 4> ValueVTs;
2810 SmallVector<uint64_t, 4> Offsets;
2811 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2812 unsigned NumValues = ValueVTs.size();
2816 // Get the lowered operands. Note that we do this after
2817 // checking if NumResults is zero, because with zero results
2818 // the operands won't have values in the map.
2819 SDValue Src = getValue(SrcV);
2820 SDValue Ptr = getValue(PtrV);
2822 SDValue Root = getRoot();
2823 SmallVector<SDValue, 4> Chains(NumValues);
2824 MVT PtrVT = Ptr.getValueType();
2825 bool isVolatile = I.isVolatile();
2826 unsigned Alignment = I.getAlignment();
2827 for (unsigned i = 0; i != NumValues; ++i)
2828 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2829 SDValue(Src.getNode(), Src.getResNo() + i),
2830 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2832 DAG.getConstant(Offsets[i], PtrVT)),
2834 isVolatile, Alignment);
2836 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2837 MVT::Other, &Chains[0], NumValues));
2840 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2842 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2843 unsigned Intrinsic) {
2844 bool HasChain = !I.doesNotAccessMemory();
2845 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2847 // Build the operand list.
2848 SmallVector<SDValue, 8> Ops;
2849 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2851 // We don't need to serialize loads against other loads.
2852 Ops.push_back(DAG.getRoot());
2854 Ops.push_back(getRoot());
2858 // Info is set by getTgtMemInstrinsic
2859 TargetLowering::IntrinsicInfo Info;
2860 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2862 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2863 if (!IsTgtIntrinsic)
2864 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2866 // Add all operands of the call to the operand list.
2867 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2868 SDValue Op = getValue(I.getOperand(i));
2869 assert(TLI.isTypeLegal(Op.getValueType()) &&
2870 "Intrinsic uses a non-legal type?");
2874 std::vector<MVT> VTs;
2875 if (I.getType() != Type::VoidTy) {
2876 MVT VT = TLI.getValueType(I.getType());
2877 if (VT.isVector()) {
2878 const VectorType *DestTy = cast<VectorType>(I.getType());
2879 MVT EltVT = TLI.getValueType(DestTy->getElementType());
2881 VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
2882 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
2885 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
2889 VTs.push_back(MVT::Other);
2891 const MVT *VTList = DAG.getNodeValueTypes(VTs);
2895 if (IsTgtIntrinsic) {
2896 // This is target intrinsic that touches memory
2897 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2899 &Ops[0], Ops.size(),
2900 Info.memVT, Info.ptrVal, Info.offset,
2901 Info.align, Info.vol,
2902 Info.readMem, Info.writeMem);
2905 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2907 &Ops[0], Ops.size());
2908 else if (I.getType() != Type::VoidTy)
2909 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2911 &Ops[0], Ops.size());
2913 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2915 &Ops[0], Ops.size());
2918 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2920 PendingLoads.push_back(Chain);
2924 if (I.getType() != Type::VoidTy) {
2925 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2926 MVT VT = TLI.getValueType(PTy);
2927 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2929 setValue(&I, Result);
2933 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2934 static GlobalVariable *ExtractTypeInfo(Value *V) {
2935 V = V->stripPointerCasts();
2936 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2937 assert ((GV || isa<ConstantPointerNull>(V)) &&
2938 "TypeInfo must be a global variable or NULL");
2944 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
2945 /// call, and add them to the specified machine basic block.
2946 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
2947 MachineBasicBlock *MBB) {
2948 // Inform the MachineModuleInfo of the personality for this landing pad.
2949 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
2950 assert(CE->getOpcode() == Instruction::BitCast &&
2951 isa<Function>(CE->getOperand(0)) &&
2952 "Personality should be a function");
2953 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
2955 // Gather all the type infos for this landing pad and pass them along to
2956 // MachineModuleInfo.
2957 std::vector<GlobalVariable *> TyInfo;
2958 unsigned N = I.getNumOperands();
2960 for (unsigned i = N - 1; i > 2; --i) {
2961 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
2962 unsigned FilterLength = CI->getZExtValue();
2963 unsigned FirstCatch = i + FilterLength + !FilterLength;
2964 assert (FirstCatch <= N && "Invalid filter length");
2966 if (FirstCatch < N) {
2967 TyInfo.reserve(N - FirstCatch);
2968 for (unsigned j = FirstCatch; j < N; ++j)
2969 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2970 MMI->addCatchTypeInfo(MBB, TyInfo);
2974 if (!FilterLength) {
2976 MMI->addCleanup(MBB);
2979 TyInfo.reserve(FilterLength - 1);
2980 for (unsigned j = i + 1; j < FirstCatch; ++j)
2981 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2982 MMI->addFilterTypeInfo(MBB, TyInfo);
2991 TyInfo.reserve(N - 3);
2992 for (unsigned j = 3; j < N; ++j)
2993 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2994 MMI->addCatchTypeInfo(MBB, TyInfo);
3000 /// GetSignificand - Get the significand and build it into a floating-point
3001 /// number with exponent of 1:
3003 /// Op = (Op & 0x007fffff) | 0x3f800000;
3005 /// where Op is the hexidecimal representation of floating point value.
3007 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3008 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3009 DAG.getConstant(0x007fffff, MVT::i32));
3010 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3011 DAG.getConstant(0x3f800000, MVT::i32));
3012 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3015 /// GetExponent - Get the exponent:
3017 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3019 /// where Op is the hexidecimal representation of floating point value.
3021 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3023 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3024 DAG.getConstant(0x7f800000, MVT::i32));
3025 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3026 DAG.getConstant(23, TLI.getShiftAmountTy()));
3027 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3028 DAG.getConstant(127, MVT::i32));
3029 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3032 /// getF32Constant - Get 32-bit floating point constant.
3034 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3035 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3038 /// Inlined utility function to implement binary input atomic intrinsics for
3039 /// visitIntrinsicCall: I is a call instruction
3040 /// Op is the associated NodeType for I
3042 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3043 SDValue Root = getRoot();
3045 DAG.getAtomic(Op, getCurDebugLoc(),
3046 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3048 getValue(I.getOperand(1)),
3049 getValue(I.getOperand(2)),
3052 DAG.setRoot(L.getValue(1));
3056 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3058 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3059 SDValue Op1 = getValue(I.getOperand(1));
3060 SDValue Op2 = getValue(I.getOperand(2));
3062 MVT ValueVTs[] = { Op1.getValueType(), MVT::i1 };
3063 SDValue Ops[] = { Op1, Op2 };
3065 SDValue Result = DAG.getNode(Op, getCurDebugLoc(),
3066 DAG.getVTList(&ValueVTs[0], 2), &Ops[0], 2);
3068 setValue(&I, Result);
3072 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3073 /// limited-precision mode.
3075 SelectionDAGLowering::visitExp(CallInst &I) {
3077 DebugLoc dl = getCurDebugLoc();
3079 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3080 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3081 SDValue Op = getValue(I.getOperand(1));
3083 // Put the exponent in the right bit position for later addition to the
3086 // #define LOG2OFe 1.4426950f
3087 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3088 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3089 getF32Constant(DAG, 0x3fb8aa3b));
3090 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3092 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3093 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3094 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3096 // IntegerPartOfX <<= 23;
3097 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3098 DAG.getConstant(23, TLI.getShiftAmountTy()));
3100 if (LimitFloatPrecision <= 6) {
3101 // For floating-point precision of 6:
3103 // TwoToFractionalPartOfX =
3105 // (0.735607626f + 0.252464424f * x) * x;
3107 // error 0.0144103317, which is 6 bits
3108 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3109 getF32Constant(DAG, 0x3e814304));
3110 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3111 getF32Constant(DAG, 0x3f3c50c8));
3112 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3113 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3114 getF32Constant(DAG, 0x3f7f5e7e));
3115 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3117 // Add the exponent into the result in integer domain.
3118 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3119 TwoToFracPartOfX, IntegerPartOfX);
3121 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3122 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3123 // For floating-point precision of 12:
3125 // TwoToFractionalPartOfX =
3128 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3130 // 0.000107046256 error, which is 13 to 14 bits
3131 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3132 getF32Constant(DAG, 0x3da235e3));
3133 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3134 getF32Constant(DAG, 0x3e65b8f3));
3135 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3136 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3137 getF32Constant(DAG, 0x3f324b07));
3138 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3139 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3140 getF32Constant(DAG, 0x3f7ff8fd));
3141 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3143 // Add the exponent into the result in integer domain.
3144 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3145 TwoToFracPartOfX, IntegerPartOfX);
3147 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3148 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3149 // For floating-point precision of 18:
3151 // TwoToFractionalPartOfX =
3155 // (0.554906021e-1f +
3156 // (0.961591928e-2f +
3157 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3159 // error 2.47208000*10^(-7), which is better than 18 bits
3160 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3161 getF32Constant(DAG, 0x3924b03e));
3162 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3163 getF32Constant(DAG, 0x3ab24b87));
3164 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3165 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3166 getF32Constant(DAG, 0x3c1d8c17));
3167 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3168 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3169 getF32Constant(DAG, 0x3d634a1d));
3170 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3171 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3172 getF32Constant(DAG, 0x3e75fe14));
3173 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3174 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3175 getF32Constant(DAG, 0x3f317234));
3176 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3177 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3178 getF32Constant(DAG, 0x3f800000));
3179 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3182 // Add the exponent into the result in integer domain.
3183 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3184 TwoToFracPartOfX, IntegerPartOfX);
3186 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3189 // No special expansion.
3190 result = DAG.getNode(ISD::FEXP, dl,
3191 getValue(I.getOperand(1)).getValueType(),
3192 getValue(I.getOperand(1)));
3195 setValue(&I, result);
3198 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3199 /// limited-precision mode.
3201 SelectionDAGLowering::visitLog(CallInst &I) {
3203 DebugLoc dl = getCurDebugLoc();
3205 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3206 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3207 SDValue Op = getValue(I.getOperand(1));
3208 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3210 // Scale the exponent by log(2) [0.69314718f].
3211 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3212 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3213 getF32Constant(DAG, 0x3f317218));
3215 // Get the significand and build it into a floating-point number with
3217 SDValue X = GetSignificand(DAG, Op1, dl);
3219 if (LimitFloatPrecision <= 6) {
3220 // For floating-point precision of 6:
3224 // (1.4034025f - 0.23903021f * x) * x;
3226 // error 0.0034276066, which is better than 8 bits
3227 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3228 getF32Constant(DAG, 0xbe74c456));
3229 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3230 getF32Constant(DAG, 0x3fb3a2b1));
3231 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3232 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3233 getF32Constant(DAG, 0x3f949a29));
3235 result = DAG.getNode(ISD::FADD, dl,
3236 MVT::f32, LogOfExponent, LogOfMantissa);
3237 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3238 // For floating-point precision of 12:
3244 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3246 // error 0.000061011436, which is 14 bits
3247 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3248 getF32Constant(DAG, 0xbd67b6d6));
3249 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3250 getF32Constant(DAG, 0x3ee4f4b8));
3251 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3252 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3253 getF32Constant(DAG, 0x3fbc278b));
3254 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3255 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3256 getF32Constant(DAG, 0x40348e95));
3257 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3258 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3259 getF32Constant(DAG, 0x3fdef31a));
3261 result = DAG.getNode(ISD::FADD, dl,
3262 MVT::f32, LogOfExponent, LogOfMantissa);
3263 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3264 // For floating-point precision of 18:
3272 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3274 // error 0.0000023660568, which is better than 18 bits
3275 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3276 getF32Constant(DAG, 0xbc91e5ac));
3277 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3278 getF32Constant(DAG, 0x3e4350aa));
3279 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3280 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3281 getF32Constant(DAG, 0x3f60d3e3));
3282 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3283 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3284 getF32Constant(DAG, 0x4011cdf0));
3285 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3286 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3287 getF32Constant(DAG, 0x406cfd1c));
3288 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3289 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3290 getF32Constant(DAG, 0x408797cb));
3291 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3292 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3293 getF32Constant(DAG, 0x4006dcab));
3295 result = DAG.getNode(ISD::FADD, dl,
3296 MVT::f32, LogOfExponent, LogOfMantissa);
3299 // No special expansion.
3300 result = DAG.getNode(ISD::FLOG, dl,
3301 getValue(I.getOperand(1)).getValueType(),
3302 getValue(I.getOperand(1)));
3305 setValue(&I, result);
3308 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3309 /// limited-precision mode.
3311 SelectionDAGLowering::visitLog2(CallInst &I) {
3313 DebugLoc dl = getCurDebugLoc();
3315 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3316 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3317 SDValue Op = getValue(I.getOperand(1));
3318 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3320 // Get the exponent.
3321 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3323 // Get the significand and build it into a floating-point number with
3325 SDValue X = GetSignificand(DAG, Op1, dl);
3327 // Different possible minimax approximations of significand in
3328 // floating-point for various degrees of accuracy over [1,2].
3329 if (LimitFloatPrecision <= 6) {
3330 // For floating-point precision of 6:
3332 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3334 // error 0.0049451742, which is more than 7 bits
3335 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3336 getF32Constant(DAG, 0xbeb08fe0));
3337 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3338 getF32Constant(DAG, 0x40019463));
3339 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3340 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3341 getF32Constant(DAG, 0x3fd6633d));
3343 result = DAG.getNode(ISD::FADD, dl,
3344 MVT::f32, LogOfExponent, Log2ofMantissa);
3345 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3346 // For floating-point precision of 12:
3352 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3354 // error 0.0000876136000, which is better than 13 bits
3355 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3356 getF32Constant(DAG, 0xbda7262e));
3357 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3358 getF32Constant(DAG, 0x3f25280b));
3359 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3360 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3361 getF32Constant(DAG, 0x4007b923));
3362 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3363 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3364 getF32Constant(DAG, 0x40823e2f));
3365 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3366 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3367 getF32Constant(DAG, 0x4020d29c));
3369 result = DAG.getNode(ISD::FADD, dl,
3370 MVT::f32, LogOfExponent, Log2ofMantissa);
3371 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3372 // For floating-point precision of 18:
3381 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3383 // error 0.0000018516, which is better than 18 bits
3384 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3385 getF32Constant(DAG, 0xbcd2769e));
3386 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3387 getF32Constant(DAG, 0x3e8ce0b9));
3388 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3389 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3390 getF32Constant(DAG, 0x3fa22ae7));
3391 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3392 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3393 getF32Constant(DAG, 0x40525723));
3394 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3395 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3396 getF32Constant(DAG, 0x40aaf200));
3397 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3398 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3399 getF32Constant(DAG, 0x40c39dad));
3400 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3401 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3402 getF32Constant(DAG, 0x4042902c));
3404 result = DAG.getNode(ISD::FADD, dl,
3405 MVT::f32, LogOfExponent, Log2ofMantissa);
3408 // No special expansion.
3409 result = DAG.getNode(ISD::FLOG2, dl,
3410 getValue(I.getOperand(1)).getValueType(),
3411 getValue(I.getOperand(1)));
3414 setValue(&I, result);
3417 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3418 /// limited-precision mode.
3420 SelectionDAGLowering::visitLog10(CallInst &I) {
3422 DebugLoc dl = getCurDebugLoc();
3424 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3425 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3426 SDValue Op = getValue(I.getOperand(1));
3427 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3429 // Scale the exponent by log10(2) [0.30102999f].
3430 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3431 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3432 getF32Constant(DAG, 0x3e9a209a));
3434 // Get the significand and build it into a floating-point number with
3436 SDValue X = GetSignificand(DAG, Op1, dl);
3438 if (LimitFloatPrecision <= 6) {
3439 // For floating-point precision of 6:
3441 // Log10ofMantissa =
3443 // (0.60948995f - 0.10380950f * x) * x;
3445 // error 0.0014886165, which is 6 bits
3446 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3447 getF32Constant(DAG, 0xbdd49a13));
3448 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3449 getF32Constant(DAG, 0x3f1c0789));
3450 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3451 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3452 getF32Constant(DAG, 0x3f011300));
3454 result = DAG.getNode(ISD::FADD, dl,
3455 MVT::f32, LogOfExponent, Log10ofMantissa);
3456 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3457 // For floating-point precision of 12:
3459 // Log10ofMantissa =
3462 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3464 // error 0.00019228036, which is better than 12 bits
3465 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3466 getF32Constant(DAG, 0x3d431f31));
3467 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3468 getF32Constant(DAG, 0x3ea21fb2));
3469 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3470 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3471 getF32Constant(DAG, 0x3f6ae232));
3472 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3473 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3474 getF32Constant(DAG, 0x3f25f7c3));
3476 result = DAG.getNode(ISD::FADD, dl,
3477 MVT::f32, LogOfExponent, Log10ofMantissa);
3478 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3479 // For floating-point precision of 18:
3481 // Log10ofMantissa =
3486 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3488 // error 0.0000037995730, which is better than 18 bits
3489 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3490 getF32Constant(DAG, 0x3c5d51ce));
3491 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3492 getF32Constant(DAG, 0x3e00685a));
3493 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3494 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3495 getF32Constant(DAG, 0x3efb6798));
3496 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3497 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3498 getF32Constant(DAG, 0x3f88d192));
3499 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3500 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3501 getF32Constant(DAG, 0x3fc4316c));
3502 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3503 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3504 getF32Constant(DAG, 0x3f57ce70));
3506 result = DAG.getNode(ISD::FADD, dl,
3507 MVT::f32, LogOfExponent, Log10ofMantissa);
3510 // No special expansion.
3511 result = DAG.getNode(ISD::FLOG10, dl,
3512 getValue(I.getOperand(1)).getValueType(),
3513 getValue(I.getOperand(1)));
3516 setValue(&I, result);
3519 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3520 /// limited-precision mode.
3522 SelectionDAGLowering::visitExp2(CallInst &I) {
3524 DebugLoc dl = getCurDebugLoc();
3526 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3527 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3528 SDValue Op = getValue(I.getOperand(1));
3530 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3532 // FractionalPartOfX = x - (float)IntegerPartOfX;
3533 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3534 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3536 // IntegerPartOfX <<= 23;
3537 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3538 DAG.getConstant(23, TLI.getShiftAmountTy()));
3540 if (LimitFloatPrecision <= 6) {
3541 // For floating-point precision of 6:
3543 // TwoToFractionalPartOfX =
3545 // (0.735607626f + 0.252464424f * x) * x;
3547 // error 0.0144103317, which is 6 bits
3548 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3549 getF32Constant(DAG, 0x3e814304));
3550 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3551 getF32Constant(DAG, 0x3f3c50c8));
3552 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3553 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3554 getF32Constant(DAG, 0x3f7f5e7e));
3555 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3556 SDValue TwoToFractionalPartOfX =
3557 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3559 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3560 MVT::f32, TwoToFractionalPartOfX);
3561 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3562 // For floating-point precision of 12:
3564 // TwoToFractionalPartOfX =
3567 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3569 // error 0.000107046256, which is 13 to 14 bits
3570 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3571 getF32Constant(DAG, 0x3da235e3));
3572 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3573 getF32Constant(DAG, 0x3e65b8f3));
3574 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3575 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3576 getF32Constant(DAG, 0x3f324b07));
3577 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3578 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3579 getF32Constant(DAG, 0x3f7ff8fd));
3580 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3581 SDValue TwoToFractionalPartOfX =
3582 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3584 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3585 MVT::f32, TwoToFractionalPartOfX);
3586 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3587 // For floating-point precision of 18:
3589 // TwoToFractionalPartOfX =
3593 // (0.554906021e-1f +
3594 // (0.961591928e-2f +
3595 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3596 // error 2.47208000*10^(-7), which is better than 18 bits
3597 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3598 getF32Constant(DAG, 0x3924b03e));
3599 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3600 getF32Constant(DAG, 0x3ab24b87));
3601 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3602 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3603 getF32Constant(DAG, 0x3c1d8c17));
3604 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3605 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3606 getF32Constant(DAG, 0x3d634a1d));
3607 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3608 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3609 getF32Constant(DAG, 0x3e75fe14));
3610 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3611 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3612 getF32Constant(DAG, 0x3f317234));
3613 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3614 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3615 getF32Constant(DAG, 0x3f800000));
3616 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3617 SDValue TwoToFractionalPartOfX =
3618 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3620 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3621 MVT::f32, TwoToFractionalPartOfX);
3624 // No special expansion.
3625 result = DAG.getNode(ISD::FEXP2, dl,
3626 getValue(I.getOperand(1)).getValueType(),
3627 getValue(I.getOperand(1)));
3630 setValue(&I, result);
3633 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3634 /// limited-precision mode with x == 10.0f.
3636 SelectionDAGLowering::visitPow(CallInst &I) {
3638 Value *Val = I.getOperand(1);
3639 DebugLoc dl = getCurDebugLoc();
3640 bool IsExp10 = false;
3642 if (getValue(Val).getValueType() == MVT::f32 &&
3643 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3644 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3645 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3646 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3648 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3653 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3654 SDValue Op = getValue(I.getOperand(2));
3656 // Put the exponent in the right bit position for later addition to the
3659 // #define LOG2OF10 3.3219281f
3660 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3661 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3662 getF32Constant(DAG, 0x40549a78));
3663 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3665 // FractionalPartOfX = x - (float)IntegerPartOfX;
3666 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3667 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3669 // IntegerPartOfX <<= 23;
3670 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3671 DAG.getConstant(23, TLI.getShiftAmountTy()));
3673 if (LimitFloatPrecision <= 6) {
3674 // For floating-point precision of 6:
3676 // twoToFractionalPartOfX =
3678 // (0.735607626f + 0.252464424f * x) * x;
3680 // error 0.0144103317, which is 6 bits
3681 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3682 getF32Constant(DAG, 0x3e814304));
3683 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3684 getF32Constant(DAG, 0x3f3c50c8));
3685 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3686 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3687 getF32Constant(DAG, 0x3f7f5e7e));
3688 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3689 SDValue TwoToFractionalPartOfX =
3690 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3692 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3693 MVT::f32, TwoToFractionalPartOfX);
3694 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3695 // For floating-point precision of 12:
3697 // TwoToFractionalPartOfX =
3700 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3702 // error 0.000107046256, which is 13 to 14 bits
3703 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3704 getF32Constant(DAG, 0x3da235e3));
3705 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3706 getF32Constant(DAG, 0x3e65b8f3));
3707 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3708 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3709 getF32Constant(DAG, 0x3f324b07));
3710 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3711 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3712 getF32Constant(DAG, 0x3f7ff8fd));
3713 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3714 SDValue TwoToFractionalPartOfX =
3715 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3717 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3718 MVT::f32, TwoToFractionalPartOfX);
3719 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3720 // For floating-point precision of 18:
3722 // TwoToFractionalPartOfX =
3726 // (0.554906021e-1f +
3727 // (0.961591928e-2f +
3728 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3729 // error 2.47208000*10^(-7), which is better than 18 bits
3730 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3731 getF32Constant(DAG, 0x3924b03e));
3732 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3733 getF32Constant(DAG, 0x3ab24b87));
3734 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3735 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3736 getF32Constant(DAG, 0x3c1d8c17));
3737 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3738 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3739 getF32Constant(DAG, 0x3d634a1d));
3740 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3741 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3742 getF32Constant(DAG, 0x3e75fe14));
3743 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3744 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3745 getF32Constant(DAG, 0x3f317234));
3746 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3747 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3748 getF32Constant(DAG, 0x3f800000));
3749 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3750 SDValue TwoToFractionalPartOfX =
3751 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3753 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3754 MVT::f32, TwoToFractionalPartOfX);
3757 // No special expansion.
3758 result = DAG.getNode(ISD::FPOW, dl,
3759 getValue(I.getOperand(1)).getValueType(),
3760 getValue(I.getOperand(1)),
3761 getValue(I.getOperand(2)));
3764 setValue(&I, result);
3767 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3768 /// we want to emit this as a call to a named external function, return the name
3769 /// otherwise lower it and return null.
3771 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3772 DebugLoc dl = getCurDebugLoc();
3773 switch (Intrinsic) {
3775 // By default, turn this into a target intrinsic node.
3776 visitTargetIntrinsic(I, Intrinsic);
3778 case Intrinsic::vastart: visitVAStart(I); return 0;
3779 case Intrinsic::vaend: visitVAEnd(I); return 0;
3780 case Intrinsic::vacopy: visitVACopy(I); return 0;
3781 case Intrinsic::returnaddress:
3782 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3783 getValue(I.getOperand(1))));
3785 case Intrinsic::frameaddress:
3786 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3787 getValue(I.getOperand(1))));
3789 case Intrinsic::setjmp:
3790 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3792 case Intrinsic::longjmp:
3793 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3795 case Intrinsic::memcpy: {
3796 SDValue Op1 = getValue(I.getOperand(1));
3797 SDValue Op2 = getValue(I.getOperand(2));
3798 SDValue Op3 = getValue(I.getOperand(3));
3799 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3800 DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false,
3801 I.getOperand(1), 0, I.getOperand(2), 0));
3804 case Intrinsic::memset: {
3805 SDValue Op1 = getValue(I.getOperand(1));
3806 SDValue Op2 = getValue(I.getOperand(2));
3807 SDValue Op3 = getValue(I.getOperand(3));
3808 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3809 DAG.setRoot(DAG.getMemset(getRoot(), Op1, Op2, Op3, Align,
3810 I.getOperand(1), 0));
3813 case Intrinsic::memmove: {
3814 SDValue Op1 = getValue(I.getOperand(1));
3815 SDValue Op2 = getValue(I.getOperand(2));
3816 SDValue Op3 = getValue(I.getOperand(3));
3817 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3819 // If the source and destination are known to not be aliases, we can
3820 // lower memmove as memcpy.
3821 uint64_t Size = -1ULL;
3822 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3823 Size = C->getZExtValue();
3824 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3825 AliasAnalysis::NoAlias) {
3826 DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false,
3827 I.getOperand(1), 0, I.getOperand(2), 0));
3831 DAG.setRoot(DAG.getMemmove(getRoot(), Op1, Op2, Op3, Align,
3832 I.getOperand(1), 0, I.getOperand(2), 0));
3835 case Intrinsic::dbg_stoppoint: {
3836 DwarfWriter *DW = DAG.getDwarfWriter();
3837 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3838 if (DW && DW->ValidDebugInfo(SPI.getContext())) {
3839 DAG.setRoot(DAG.getDbgStopPoint(getRoot(),
3843 DICompileUnit CU(cast<GlobalVariable>(SPI.getContext()));
3844 unsigned SrcFile = DW->RecordSource(CU.getDirectory(), CU.getFilename());
3845 unsigned idx = DAG.getMachineFunction().
3846 getOrCreateDebugLocID(SrcFile,
3849 setCurDebugLoc(DebugLoc::get(idx));
3853 case Intrinsic::dbg_region_start: {
3854 DwarfWriter *DW = DAG.getDwarfWriter();
3855 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3856 if (DW && DW->ValidDebugInfo(RSI.getContext())) {
3858 DW->RecordRegionStart(cast<GlobalVariable>(RSI.getContext()));
3859 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
3864 case Intrinsic::dbg_region_end: {
3865 DwarfWriter *DW = DAG.getDwarfWriter();
3866 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3867 if (DW && DW->ValidDebugInfo(REI.getContext())) {
3869 DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
3870 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
3875 case Intrinsic::dbg_func_start: {
3876 DwarfWriter *DW = DAG.getDwarfWriter();
3878 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3879 Value *SP = FSI.getSubprogram();
3880 if (SP && DW->ValidDebugInfo(SP)) {
3881 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
3882 // what (most?) gdb expects.
3883 DISubprogram Subprogram(cast<GlobalVariable>(SP));
3884 DICompileUnit CompileUnit = Subprogram.getCompileUnit();
3885 unsigned SrcFile = DW->RecordSource(CompileUnit.getDirectory(),
3886 CompileUnit.getFilename());
3887 // Record the source line but does not create a label for the normal
3888 // function start. It will be emitted at asm emission time. However,
3889 // create a label if this is a beginning of inlined function.
3890 unsigned Line = Subprogram.getLineNumber();
3892 DW->RecordSourceLine(Line, 0, SrcFile);
3893 if (DW->getRecordSourceLineCount() != 1)
3894 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
3895 setCurDebugLoc(DebugLoc::get(DAG.getMachineFunction().
3896 getOrCreateDebugLocID(SrcFile, Line, 0)));
3901 case Intrinsic::dbg_declare: {
3902 DwarfWriter *DW = DAG.getDwarfWriter();
3903 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3904 Value *Variable = DI.getVariable();
3905 if (DW && DW->ValidDebugInfo(Variable))
3906 DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
3907 getValue(DI.getAddress()), getValue(Variable)));
3911 case Intrinsic::eh_exception: {
3912 if (!CurMBB->isLandingPad()) {
3913 // FIXME: Mark exception register as live in. Hack for PR1508.
3914 unsigned Reg = TLI.getExceptionAddressRegister();
3915 if (Reg) CurMBB->addLiveIn(Reg);
3917 // Insert the EXCEPTIONADDR instruction.
3918 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3920 Ops[0] = DAG.getRoot();
3921 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3923 DAG.setRoot(Op.getValue(1));
3927 case Intrinsic::eh_selector_i32:
3928 case Intrinsic::eh_selector_i64: {
3929 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3930 MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
3931 MVT::i32 : MVT::i64);
3934 if (CurMBB->isLandingPad())
3935 AddCatchInfo(I, MMI, CurMBB);
3938 FuncInfo.CatchInfoLost.insert(&I);
3940 // FIXME: Mark exception selector register as live in. Hack for PR1508.
3941 unsigned Reg = TLI.getExceptionSelectorRegister();
3942 if (Reg) CurMBB->addLiveIn(Reg);
3945 // Insert the EHSELECTION instruction.
3946 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
3948 Ops[0] = getValue(I.getOperand(1));
3950 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
3952 DAG.setRoot(Op.getValue(1));
3954 setValue(&I, DAG.getConstant(0, VT));
3960 case Intrinsic::eh_typeid_for_i32:
3961 case Intrinsic::eh_typeid_for_i64: {
3962 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3963 MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
3964 MVT::i32 : MVT::i64);
3967 // Find the type id for the given typeinfo.
3968 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
3970 unsigned TypeID = MMI->getTypeIDFor(GV);
3971 setValue(&I, DAG.getConstant(TypeID, VT));
3973 // Return something different to eh_selector.
3974 setValue(&I, DAG.getConstant(1, VT));
3980 case Intrinsic::eh_return_i32:
3981 case Intrinsic::eh_return_i64:
3982 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3983 MMI->setCallsEHReturn(true);
3984 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
3987 getValue(I.getOperand(1)),
3988 getValue(I.getOperand(2))));
3990 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
3994 case Intrinsic::eh_unwind_init:
3995 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3996 MMI->setCallsUnwindInit(true);
4001 case Intrinsic::eh_dwarf_cfa: {
4002 MVT VT = getValue(I.getOperand(1)).getValueType();
4004 if (VT.bitsGT(TLI.getPointerTy()))
4005 CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
4006 TLI.getPointerTy(), getValue(I.getOperand(1)));
4008 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
4009 TLI.getPointerTy(), getValue(I.getOperand(1)));
4011 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4013 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4014 TLI.getPointerTy()),
4016 setValue(&I, DAG.getNode(ISD::ADD, dl,
4018 DAG.getNode(ISD::FRAMEADDR, dl,
4021 TLI.getPointerTy())),
4026 case Intrinsic::convertff:
4027 case Intrinsic::convertfsi:
4028 case Intrinsic::convertfui:
4029 case Intrinsic::convertsif:
4030 case Intrinsic::convertuif:
4031 case Intrinsic::convertss:
4032 case Intrinsic::convertsu:
4033 case Intrinsic::convertus:
4034 case Intrinsic::convertuu: {
4035 ISD::CvtCode Code = ISD::CVT_INVALID;
4036 switch (Intrinsic) {
4037 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4038 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4039 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4040 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4041 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4042 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4043 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4044 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4045 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4047 MVT DestVT = TLI.getValueType(I.getType());
4048 Value* Op1 = I.getOperand(1);
4049 setValue(&I, DAG.getConvertRndSat(DestVT, getValue(Op1),
4050 DAG.getValueType(DestVT),
4051 DAG.getValueType(getValue(Op1).getValueType()),
4052 getValue(I.getOperand(2)),
4053 getValue(I.getOperand(3)),
4058 case Intrinsic::sqrt:
4059 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4060 getValue(I.getOperand(1)).getValueType(),
4061 getValue(I.getOperand(1))));
4063 case Intrinsic::powi:
4064 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4065 getValue(I.getOperand(1)).getValueType(),
4066 getValue(I.getOperand(1)),
4067 getValue(I.getOperand(2))));
4069 case Intrinsic::sin:
4070 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4071 getValue(I.getOperand(1)).getValueType(),
4072 getValue(I.getOperand(1))));
4074 case Intrinsic::cos:
4075 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4076 getValue(I.getOperand(1)).getValueType(),
4077 getValue(I.getOperand(1))));
4079 case Intrinsic::log:
4082 case Intrinsic::log2:
4085 case Intrinsic::log10:
4088 case Intrinsic::exp:
4091 case Intrinsic::exp2:
4094 case Intrinsic::pow:
4097 case Intrinsic::pcmarker: {
4098 SDValue Tmp = getValue(I.getOperand(1));
4099 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4102 case Intrinsic::readcyclecounter: {
4103 SDValue Op = getRoot();
4104 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4105 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2,
4108 DAG.setRoot(Tmp.getValue(1));
4111 case Intrinsic::part_select: {
4112 // Currently not implemented: just abort
4113 assert(0 && "part_select intrinsic not implemented");
4116 case Intrinsic::part_set: {
4117 // Currently not implemented: just abort
4118 assert(0 && "part_set intrinsic not implemented");
4121 case Intrinsic::bswap:
4122 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4123 getValue(I.getOperand(1)).getValueType(),
4124 getValue(I.getOperand(1))));
4126 case Intrinsic::cttz: {
4127 SDValue Arg = getValue(I.getOperand(1));
4128 MVT Ty = Arg.getValueType();
4129 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4130 setValue(&I, result);
4133 case Intrinsic::ctlz: {
4134 SDValue Arg = getValue(I.getOperand(1));
4135 MVT Ty = Arg.getValueType();
4136 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4137 setValue(&I, result);
4140 case Intrinsic::ctpop: {
4141 SDValue Arg = getValue(I.getOperand(1));
4142 MVT Ty = Arg.getValueType();
4143 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4144 setValue(&I, result);
4147 case Intrinsic::stacksave: {
4148 SDValue Op = getRoot();
4149 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4150 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1);
4152 DAG.setRoot(Tmp.getValue(1));
4155 case Intrinsic::stackrestore: {
4156 SDValue Tmp = getValue(I.getOperand(1));
4157 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4160 case Intrinsic::stackprotector: {
4161 // Emit code into the DAG to store the stack guard onto the stack.
4162 MachineFunction &MF = DAG.getMachineFunction();
4163 MachineFrameInfo *MFI = MF.getFrameInfo();
4164 MVT PtrTy = TLI.getPointerTy();
4166 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4167 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4169 int FI = FuncInfo.StaticAllocaMap[Slot];
4170 MFI->setStackProtectorIndex(FI);
4172 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4174 // Store the stack protector onto the stack.
4175 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4176 PseudoSourceValue::getFixedStack(FI),
4178 setValue(&I, Result);
4179 DAG.setRoot(Result);
4182 case Intrinsic::var_annotation:
4183 // Discard annotate attributes
4186 case Intrinsic::init_trampoline: {
4187 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4191 Ops[1] = getValue(I.getOperand(1));
4192 Ops[2] = getValue(I.getOperand(2));
4193 Ops[3] = getValue(I.getOperand(3));
4194 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4195 Ops[5] = DAG.getSrcValue(F);
4197 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4198 DAG.getNodeValueTypes(TLI.getPointerTy(),
4203 DAG.setRoot(Tmp.getValue(1));
4207 case Intrinsic::gcroot:
4209 Value *Alloca = I.getOperand(1);
4210 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4212 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4213 GFI->addStackRoot(FI->getIndex(), TypeMap);
4217 case Intrinsic::gcread:
4218 case Intrinsic::gcwrite:
4219 assert(0 && "GC failed to lower gcread/gcwrite intrinsics!");
4222 case Intrinsic::flt_rounds: {
4223 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4227 case Intrinsic::trap: {
4228 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4232 case Intrinsic::uadd_with_overflow:
4233 return implVisitAluOverflow(I, ISD::UADDO);
4234 case Intrinsic::sadd_with_overflow:
4235 return implVisitAluOverflow(I, ISD::SADDO);
4236 case Intrinsic::usub_with_overflow:
4237 return implVisitAluOverflow(I, ISD::USUBO);
4238 case Intrinsic::ssub_with_overflow:
4239 return implVisitAluOverflow(I, ISD::SSUBO);
4240 case Intrinsic::umul_with_overflow:
4241 return implVisitAluOverflow(I, ISD::UMULO);
4242 case Intrinsic::smul_with_overflow:
4243 return implVisitAluOverflow(I, ISD::SMULO);
4245 case Intrinsic::prefetch: {
4248 Ops[1] = getValue(I.getOperand(1));
4249 Ops[2] = getValue(I.getOperand(2));
4250 Ops[3] = getValue(I.getOperand(3));
4251 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4255 case Intrinsic::memory_barrier: {
4258 for (int x = 1; x < 6; ++x)
4259 Ops[x] = getValue(I.getOperand(x));
4261 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4264 case Intrinsic::atomic_cmp_swap: {
4265 SDValue Root = getRoot();
4267 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4268 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4270 getValue(I.getOperand(1)),
4271 getValue(I.getOperand(2)),
4272 getValue(I.getOperand(3)),
4275 DAG.setRoot(L.getValue(1));
4278 case Intrinsic::atomic_load_add:
4279 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4280 case Intrinsic::atomic_load_sub:
4281 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4282 case Intrinsic::atomic_load_or:
4283 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4284 case Intrinsic::atomic_load_xor:
4285 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4286 case Intrinsic::atomic_load_and:
4287 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4288 case Intrinsic::atomic_load_nand:
4289 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4290 case Intrinsic::atomic_load_max:
4291 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4292 case Intrinsic::atomic_load_min:
4293 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4294 case Intrinsic::atomic_load_umin:
4295 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4296 case Intrinsic::atomic_load_umax:
4297 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4298 case Intrinsic::atomic_swap:
4299 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4304 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4306 MachineBasicBlock *LandingPad) {
4307 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4308 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4309 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4310 unsigned BeginLabel = 0, EndLabel = 0;
4312 TargetLowering::ArgListTy Args;
4313 TargetLowering::ArgListEntry Entry;
4314 Args.reserve(CS.arg_size());
4315 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4317 SDValue ArgNode = getValue(*i);
4318 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4320 unsigned attrInd = i - CS.arg_begin() + 1;
4321 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4322 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4323 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4324 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4325 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4326 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4327 Entry.Alignment = CS.getParamAlignment(attrInd);
4328 Args.push_back(Entry);
4331 if (LandingPad && MMI) {
4332 // Insert a label before the invoke call to mark the try range. This can be
4333 // used to detect deletion of the invoke via the MachineModuleInfo.
4334 BeginLabel = MMI->NextLabelID();
4335 // Both PendingLoads and PendingExports must be flushed here;
4336 // this call might not return.
4338 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getControlRoot(), BeginLabel));
4341 std::pair<SDValue,SDValue> Result =
4342 TLI.LowerCallTo(getRoot(), CS.getType(),
4343 CS.paramHasAttr(0, Attribute::SExt),
4344 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4345 CS.paramHasAttr(0, Attribute::InReg),
4346 CS.getCallingConv(),
4347 IsTailCall && PerformTailCallOpt,
4348 Callee, Args, DAG, getCurDebugLoc());
4349 if (CS.getType() != Type::VoidTy)
4350 setValue(CS.getInstruction(), Result.first);
4351 DAG.setRoot(Result.second);
4353 if (LandingPad && MMI) {
4354 // Insert a label at the end of the invoke call to mark the try range. This
4355 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4356 EndLabel = MMI->NextLabelID();
4357 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getRoot(), EndLabel));
4359 // Inform MachineModuleInfo of range.
4360 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4365 void SelectionDAGLowering::visitCall(CallInst &I) {
4366 const char *RenameFn = 0;
4367 if (Function *F = I.getCalledFunction()) {
4368 if (F->isDeclaration()) {
4369 if (unsigned IID = F->getIntrinsicID()) {
4370 RenameFn = visitIntrinsicCall(I, IID);
4376 // Check for well-known libc/libm calls. If the function is internal, it
4377 // can't be a library call.
4378 unsigned NameLen = F->getNameLen();
4379 if (!F->hasLocalLinkage() && NameLen) {
4380 const char *NameStr = F->getNameStart();
4381 if (NameStr[0] == 'c' &&
4382 ((NameLen == 8 && !strcmp(NameStr, "copysign")) ||
4383 (NameLen == 9 && !strcmp(NameStr, "copysignf")))) {
4384 if (I.getNumOperands() == 3 && // Basic sanity checks.
4385 I.getOperand(1)->getType()->isFloatingPoint() &&
4386 I.getType() == I.getOperand(1)->getType() &&
4387 I.getType() == I.getOperand(2)->getType()) {
4388 SDValue LHS = getValue(I.getOperand(1));
4389 SDValue RHS = getValue(I.getOperand(2));
4390 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4391 LHS.getValueType(), LHS, RHS));
4394 } else if (NameStr[0] == 'f' &&
4395 ((NameLen == 4 && !strcmp(NameStr, "fabs")) ||
4396 (NameLen == 5 && !strcmp(NameStr, "fabsf")) ||
4397 (NameLen == 5 && !strcmp(NameStr, "fabsl")))) {
4398 if (I.getNumOperands() == 2 && // Basic sanity checks.
4399 I.getOperand(1)->getType()->isFloatingPoint() &&
4400 I.getType() == I.getOperand(1)->getType()) {
4401 SDValue Tmp = getValue(I.getOperand(1));
4402 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4403 Tmp.getValueType(), Tmp));
4406 } else if (NameStr[0] == 's' &&
4407 ((NameLen == 3 && !strcmp(NameStr, "sin")) ||
4408 (NameLen == 4 && !strcmp(NameStr, "sinf")) ||
4409 (NameLen == 4 && !strcmp(NameStr, "sinl")))) {
4410 if (I.getNumOperands() == 2 && // Basic sanity checks.
4411 I.getOperand(1)->getType()->isFloatingPoint() &&
4412 I.getType() == I.getOperand(1)->getType()) {
4413 SDValue Tmp = getValue(I.getOperand(1));
4414 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4415 Tmp.getValueType(), Tmp));
4418 } else if (NameStr[0] == 'c' &&
4419 ((NameLen == 3 && !strcmp(NameStr, "cos")) ||
4420 (NameLen == 4 && !strcmp(NameStr, "cosf")) ||
4421 (NameLen == 4 && !strcmp(NameStr, "cosl")))) {
4422 if (I.getNumOperands() == 2 && // Basic sanity checks.
4423 I.getOperand(1)->getType()->isFloatingPoint() &&
4424 I.getType() == I.getOperand(1)->getType()) {
4425 SDValue Tmp = getValue(I.getOperand(1));
4426 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4427 Tmp.getValueType(), Tmp));
4432 } else if (isa<InlineAsm>(I.getOperand(0))) {
4439 Callee = getValue(I.getOperand(0));
4441 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4443 LowerCallTo(&I, Callee, I.isTailCall());
4447 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4448 /// this value and returns the result as a ValueVT value. This uses
4449 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4450 /// If the Flag pointer is NULL, no flag is used.
4451 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4453 SDValue *Flag) const {
4454 // Assemble the legal parts into the final values.
4455 SmallVector<SDValue, 4> Values(ValueVTs.size());
4456 SmallVector<SDValue, 8> Parts;
4457 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4458 // Copy the legal parts from the registers.
4459 MVT ValueVT = ValueVTs[Value];
4460 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
4461 MVT RegisterVT = RegVTs[Value];
4463 Parts.resize(NumRegs);
4464 for (unsigned i = 0; i != NumRegs; ++i) {
4467 P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT);
4469 P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT, *Flag);
4470 *Flag = P.getValue(2);
4472 Chain = P.getValue(1);
4474 // If the source register was virtual and if we know something about it,
4475 // add an assert node.
4476 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4477 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4478 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4479 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4480 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4481 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4483 unsigned RegSize = RegisterVT.getSizeInBits();
4484 unsigned NumSignBits = LOI.NumSignBits;
4485 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4487 // FIXME: We capture more information than the dag can represent. For
4488 // now, just use the tightest assertzext/assertsext possible.
4490 MVT FromVT(MVT::Other);
4491 if (NumSignBits == RegSize)
4492 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4493 else if (NumZeroBits >= RegSize-1)
4494 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4495 else if (NumSignBits > RegSize-8)
4496 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4497 else if (NumZeroBits >= RegSize-9)
4498 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4499 else if (NumSignBits > RegSize-16)
4500 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4501 else if (NumZeroBits >= RegSize-17)
4502 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4503 else if (NumSignBits > RegSize-32)
4504 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4505 else if (NumZeroBits >= RegSize-33)
4506 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4508 if (FromVT != MVT::Other) {
4509 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4510 RegisterVT, P, DAG.getValueType(FromVT));
4519 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4520 NumRegs, RegisterVT, ValueVT);
4525 return DAG.getNode(ISD::MERGE_VALUES, dl,
4526 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4527 &Values[0], ValueVTs.size());
4530 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4531 /// specified value into the registers specified by this object. This uses
4532 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4533 /// If the Flag pointer is NULL, no flag is used.
4534 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4535 SDValue &Chain, SDValue *Flag) const {
4536 // Get the list of the values's legal parts.
4537 unsigned NumRegs = Regs.size();
4538 SmallVector<SDValue, 8> Parts(NumRegs);
4539 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4540 MVT ValueVT = ValueVTs[Value];
4541 unsigned NumParts = TLI->getNumRegisters(ValueVT);
4542 MVT RegisterVT = RegVTs[Value];
4544 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4545 &Parts[Part], NumParts, RegisterVT);
4549 // Copy the parts into the registers.
4550 SmallVector<SDValue, 8> Chains(NumRegs);
4551 for (unsigned i = 0; i != NumRegs; ++i) {
4554 Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i]);
4556 Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i], *Flag);
4557 *Flag = Part.getValue(1);
4559 Chains[i] = Part.getValue(0);
4562 if (NumRegs == 1 || Flag)
4563 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4564 // flagged to it. That is the CopyToReg nodes and the user are considered
4565 // a single scheduling unit. If we create a TokenFactor and return it as
4566 // chain, then the TokenFactor is both a predecessor (operand) of the
4567 // user as well as a successor (the TF operands are flagged to the user).
4568 // c1, f1 = CopyToReg
4569 // c2, f2 = CopyToReg
4570 // c3 = TokenFactor c1, c2
4573 Chain = Chains[NumRegs-1];
4575 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4578 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4579 /// operand list. This adds the code marker and includes the number of
4580 /// values added into it.
4581 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
4582 std::vector<SDValue> &Ops) const {
4583 MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4584 Ops.push_back(DAG.getTargetConstant(Code | (Regs.size() << 3), IntPtrTy));
4585 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4586 unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
4587 MVT RegisterVT = RegVTs[Value];
4588 for (unsigned i = 0; i != NumRegs; ++i) {
4589 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4590 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4595 /// isAllocatableRegister - If the specified register is safe to allocate,
4596 /// i.e. it isn't a stack pointer or some other special register, return the
4597 /// register class for the register. Otherwise, return null.
4598 static const TargetRegisterClass *
4599 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4600 const TargetLowering &TLI,
4601 const TargetRegisterInfo *TRI) {
4602 MVT FoundVT = MVT::Other;
4603 const TargetRegisterClass *FoundRC = 0;
4604 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4605 E = TRI->regclass_end(); RCI != E; ++RCI) {
4606 MVT ThisVT = MVT::Other;
4608 const TargetRegisterClass *RC = *RCI;
4609 // If none of the the value types for this register class are valid, we
4610 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4611 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4613 if (TLI.isTypeLegal(*I)) {
4614 // If we have already found this register in a different register class,
4615 // choose the one with the largest VT specified. For example, on
4616 // PowerPC, we favor f64 register classes over f32.
4617 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4624 if (ThisVT == MVT::Other) continue;
4626 // NOTE: This isn't ideal. In particular, this might allocate the
4627 // frame pointer in functions that need it (due to them not being taken
4628 // out of allocation, because a variable sized allocation hasn't been seen
4629 // yet). This is a slight code pessimization, but should still work.
4630 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4631 E = RC->allocation_order_end(MF); I != E; ++I)
4633 // We found a matching register class. Keep looking at others in case
4634 // we find one with larger registers that this physreg is also in.
4645 /// AsmOperandInfo - This contains information for each constraint that we are
4647 struct VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4648 public TargetLowering::AsmOperandInfo {
4649 /// CallOperand - If this is the result output operand or a clobber
4650 /// this is null, otherwise it is the incoming operand to the CallInst.
4651 /// This gets modified as the asm is processed.
4652 SDValue CallOperand;
4654 /// AssignedRegs - If this is a register or register class operand, this
4655 /// contains the set of register corresponding to the operand.
4656 RegsForValue AssignedRegs;
4658 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4659 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4662 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4663 /// busy in OutputRegs/InputRegs.
4664 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4665 std::set<unsigned> &OutputRegs,
4666 std::set<unsigned> &InputRegs,
4667 const TargetRegisterInfo &TRI) const {
4669 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4670 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4673 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4674 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4678 /// getCallOperandValMVT - Return the MVT of the Value* that this operand
4679 /// corresponds to. If there is no Value* for this operand, it returns
4681 MVT getCallOperandValMVT(const TargetLowering &TLI,
4682 const TargetData *TD) const {
4683 if (CallOperandVal == 0) return MVT::Other;
4685 if (isa<BasicBlock>(CallOperandVal))
4686 return TLI.getPointerTy();
4688 const llvm::Type *OpTy = CallOperandVal->getType();
4690 // If this is an indirect operand, the operand is a pointer to the
4693 OpTy = cast<PointerType>(OpTy)->getElementType();
4695 // If OpTy is not a single value, it may be a struct/union that we
4696 // can tile with integers.
4697 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4698 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4707 OpTy = IntegerType::get(BitSize);
4712 return TLI.getValueType(OpTy, true);
4716 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4718 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4719 const TargetRegisterInfo &TRI) {
4720 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4722 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4723 for (; *Aliases; ++Aliases)
4724 Regs.insert(*Aliases);
4727 } // end llvm namespace.
4730 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4731 /// specified operand. We prefer to assign virtual registers, to allow the
4732 /// register allocator handle the assignment process. However, if the asm uses
4733 /// features that we can't model on machineinstrs, we have SDISel do the
4734 /// allocation. This produces generally horrible, but correct, code.
4736 /// OpInfo describes the operand.
4737 /// Input and OutputRegs are the set of already allocated physical registers.
4739 void SelectionDAGLowering::
4740 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4741 std::set<unsigned> &OutputRegs,
4742 std::set<unsigned> &InputRegs) {
4743 // Compute whether this value requires an input register, an output register,
4745 bool isOutReg = false;
4746 bool isInReg = false;
4747 switch (OpInfo.Type) {
4748 case InlineAsm::isOutput:
4751 // If there is an input constraint that matches this, we need to reserve
4752 // the input register so no other inputs allocate to it.
4753 isInReg = OpInfo.hasMatchingInput();
4755 case InlineAsm::isInput:
4759 case InlineAsm::isClobber:
4766 MachineFunction &MF = DAG.getMachineFunction();
4767 SmallVector<unsigned, 4> Regs;
4769 // If this is a constraint for a single physreg, or a constraint for a
4770 // register class, find it.
4771 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4772 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4773 OpInfo.ConstraintVT);
4775 unsigned NumRegs = 1;
4776 if (OpInfo.ConstraintVT != MVT::Other) {
4777 // If this is a FP input in an integer register (or visa versa) insert a bit
4778 // cast of the input value. More generally, handle any case where the input
4779 // value disagrees with the register class we plan to stick this in.
4780 if (OpInfo.Type == InlineAsm::isInput &&
4781 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4782 // Try to convert to the first MVT that the reg class contains. If the
4783 // types are identical size, use a bitcast to convert (e.g. two differing
4785 MVT RegVT = *PhysReg.second->vt_begin();
4786 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4787 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4788 RegVT, OpInfo.CallOperand);
4789 OpInfo.ConstraintVT = RegVT;
4790 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4791 // If the input is a FP value and we want it in FP registers, do a
4792 // bitcast to the corresponding integer type. This turns an f64 value
4793 // into i64, which can be passed with two i32 values on a 32-bit
4795 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
4796 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4797 RegVT, OpInfo.CallOperand);
4798 OpInfo.ConstraintVT = RegVT;
4802 NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
4806 MVT ValueVT = OpInfo.ConstraintVT;
4808 // If this is a constraint for a specific physical register, like {r17},
4810 if (PhysReg.first) {
4811 if (OpInfo.ConstraintVT == MVT::Other)
4812 ValueVT = *PhysReg.second->vt_begin();
4814 // Get the actual register value type. This is important, because the user
4815 // may have asked for (e.g.) the AX register in i32 type. We need to
4816 // remember that AX is actually i16 to get the right extension.
4817 RegVT = *PhysReg.second->vt_begin();
4819 // This is a explicit reference to a physical register.
4820 Regs.push_back(PhysReg.first);
4822 // If this is an expanded reference, add the rest of the regs to Regs.
4824 TargetRegisterClass::iterator I = PhysReg.second->begin();
4825 for (; *I != PhysReg.first; ++I)
4826 assert(I != PhysReg.second->end() && "Didn't find reg!");
4828 // Already added the first reg.
4830 for (; NumRegs; --NumRegs, ++I) {
4831 assert(I != PhysReg.second->end() && "Ran out of registers to allocate!");
4835 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4836 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4837 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4841 // Otherwise, if this was a reference to an LLVM register class, create vregs
4842 // for this reference.
4843 std::vector<unsigned> RegClassRegs;
4844 const TargetRegisterClass *RC = PhysReg.second;
4846 // If this is a tied register, our regalloc doesn't know how to maintain
4847 // the constraint, so we have to pick a register to pin the input/output to.
4848 // If it isn't a matched constraint, go ahead and create vreg and let the
4849 // regalloc do its thing.
4850 if (!OpInfo.hasMatchingInput()) {
4851 RegVT = *PhysReg.second->vt_begin();
4852 if (OpInfo.ConstraintVT == MVT::Other)
4855 // Create the appropriate number of virtual registers.
4856 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4857 for (; NumRegs; --NumRegs)
4858 Regs.push_back(RegInfo.createVirtualRegister(PhysReg.second));
4860 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4864 // Otherwise, we can't allocate it. Let the code below figure out how to
4865 // maintain these constraints.
4866 RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end());
4869 // This is a reference to a register class that doesn't directly correspond
4870 // to an LLVM register class. Allocate NumRegs consecutive, available,
4871 // registers from the class.
4872 RegClassRegs = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
4873 OpInfo.ConstraintVT);
4876 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4877 unsigned NumAllocated = 0;
4878 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
4879 unsigned Reg = RegClassRegs[i];
4880 // See if this register is available.
4881 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
4882 (isInReg && InputRegs.count(Reg))) { // Already used.
4883 // Make sure we find consecutive registers.
4888 // Check to see if this register is allocatable (i.e. don't give out the
4891 RC = isAllocatableRegister(Reg, MF, TLI, TRI);
4892 if (!RC) { // Couldn't allocate this register.
4893 // Reset NumAllocated to make sure we return consecutive registers.
4899 // Okay, this register is good, we can use it.
4902 // If we allocated enough consecutive registers, succeed.
4903 if (NumAllocated == NumRegs) {
4904 unsigned RegStart = (i-NumAllocated)+1;
4905 unsigned RegEnd = i+1;
4906 // Mark all of the allocated registers used.
4907 for (unsigned i = RegStart; i != RegEnd; ++i)
4908 Regs.push_back(RegClassRegs[i]);
4910 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
4911 OpInfo.ConstraintVT);
4912 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4917 // Otherwise, we couldn't allocate enough registers for this.
4920 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
4921 /// processed uses a memory 'm' constraint.
4923 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
4924 const TargetLowering &TLI) {
4925 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
4926 InlineAsm::ConstraintInfo &CI = CInfos[i];
4927 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
4928 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
4929 if (CType == TargetLowering::C_Memory)
4937 /// visitInlineAsm - Handle a call to an InlineAsm object.
4939 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
4940 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
4942 /// ConstraintOperands - Information about all of the constraints.
4943 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
4945 SDValue Chain = getRoot();
4948 std::set<unsigned> OutputRegs, InputRegs;
4950 // Do a prepass over the constraints, canonicalizing them, and building up the
4951 // ConstraintOperands list.
4952 std::vector<InlineAsm::ConstraintInfo>
4953 ConstraintInfos = IA->ParseConstraints();
4955 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
4957 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
4958 unsigned ResNo = 0; // ResNo - The result number of the next output.
4959 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
4960 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
4961 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
4963 MVT OpVT = MVT::Other;
4965 // Compute the value type for each operand.
4966 switch (OpInfo.Type) {
4967 case InlineAsm::isOutput:
4968 // Indirect outputs just consume an argument.
4969 if (OpInfo.isIndirect) {
4970 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
4974 // The return value of the call is this value. As such, there is no
4975 // corresponding argument.
4976 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
4977 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
4978 OpVT = TLI.getValueType(STy->getElementType(ResNo));
4980 assert(ResNo == 0 && "Asm only has one result!");
4981 OpVT = TLI.getValueType(CS.getType());
4985 case InlineAsm::isInput:
4986 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
4988 case InlineAsm::isClobber:
4993 // If this is an input or an indirect output, process the call argument.
4994 // BasicBlocks are labels, currently appearing only in asm's.
4995 if (OpInfo.CallOperandVal) {
4996 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
4997 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
4999 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5002 OpVT = OpInfo.getCallOperandValMVT(TLI, TD);
5005 OpInfo.ConstraintVT = OpVT;
5008 // Second pass over the constraints: compute which constraint option to use
5009 // and assign registers to constraints that want a specific physreg.
5010 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5011 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5013 // If this is an output operand with a matching input operand, look up the
5014 // matching input. If their types mismatch, e.g. one is an integer, the
5015 // other is floating point, or their sizes are different, flag it as an
5017 if (OpInfo.hasMatchingInput()) {
5018 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5019 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5020 if ((OpInfo.ConstraintVT.isInteger() !=
5021 Input.ConstraintVT.isInteger()) ||
5022 (OpInfo.ConstraintVT.getSizeInBits() !=
5023 Input.ConstraintVT.getSizeInBits())) {
5024 cerr << "Unsupported asm: input constraint with a matching output "
5025 << "constraint of incompatible type!\n";
5028 Input.ConstraintVT = OpInfo.ConstraintVT;
5032 // Compute the constraint code and ConstraintType to use.
5033 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5035 // If this is a memory input, and if the operand is not indirect, do what we
5036 // need to to provide an address for the memory input.
5037 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5038 !OpInfo.isIndirect) {
5039 assert(OpInfo.Type == InlineAsm::isInput &&
5040 "Can only indirectify direct input operands!");
5042 // Memory operands really want the address of the value. If we don't have
5043 // an indirect input, put it in the constpool if we can, otherwise spill
5044 // it to a stack slot.
5046 // If the operand is a float, integer, or vector constant, spill to a
5047 // constant pool entry to get its address.
5048 Value *OpVal = OpInfo.CallOperandVal;
5049 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5050 isa<ConstantVector>(OpVal)) {
5051 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5052 TLI.getPointerTy());
5054 // Otherwise, create a stack slot and emit a store to it before the
5056 const Type *Ty = OpVal->getType();
5057 uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
5058 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5059 MachineFunction &MF = DAG.getMachineFunction();
5060 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5061 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5062 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5063 OpInfo.CallOperand, StackSlot, NULL, 0);
5064 OpInfo.CallOperand = StackSlot;
5067 // There is no longer a Value* corresponding to this operand.
5068 OpInfo.CallOperandVal = 0;
5069 // It is now an indirect operand.
5070 OpInfo.isIndirect = true;
5073 // If this constraint is for a specific register, allocate it before
5075 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5076 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5078 ConstraintInfos.clear();
5081 // Second pass - Loop over all of the operands, assigning virtual or physregs
5082 // to register class operands.
5083 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5084 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5086 // C_Register operands have already been allocated, Other/Memory don't need
5088 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5089 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5092 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5093 std::vector<SDValue> AsmNodeOperands;
5094 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5095 AsmNodeOperands.push_back(
5096 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5099 // Loop over all of the inputs, copying the operand values into the
5100 // appropriate registers and processing the output regs.
5101 RegsForValue RetValRegs;
5103 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5104 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5106 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5107 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5109 switch (OpInfo.Type) {
5110 case InlineAsm::isOutput: {
5111 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5112 OpInfo.ConstraintType != TargetLowering::C_Register) {
5113 // Memory output, or 'other' output (e.g. 'X' constraint).
5114 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5116 // Add information to the INLINEASM node to know about this output.
5117 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5118 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5119 TLI.getPointerTy()));
5120 AsmNodeOperands.push_back(OpInfo.CallOperand);
5124 // Otherwise, this is a register or register class output.
5126 // Copy the output from the appropriate register. Find a register that
5128 if (OpInfo.AssignedRegs.Regs.empty()) {
5129 cerr << "Couldn't allocate output reg for constraint '"
5130 << OpInfo.ConstraintCode << "'!\n";
5134 // If this is an indirect operand, store through the pointer after the
5136 if (OpInfo.isIndirect) {
5137 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5138 OpInfo.CallOperandVal));
5140 // This is the result value of the call.
5141 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5142 // Concatenate this output onto the outputs list.
5143 RetValRegs.append(OpInfo.AssignedRegs);
5146 // Add information to the INLINEASM node to know that this register is
5148 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5149 6 /* EARLYCLOBBER REGDEF */ :
5151 DAG, AsmNodeOperands);
5154 case InlineAsm::isInput: {
5155 SDValue InOperandVal = OpInfo.CallOperand;
5157 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5158 // If this is required to match an output register we have already set,
5159 // just use its register.
5160 unsigned OperandNo = OpInfo.getMatchedOperand();
5162 // Scan until we find the definition we already emitted of this operand.
5163 // When we find it, create a RegsForValue operand.
5164 unsigned CurOp = 2; // The first operand.
5165 for (; OperandNo; --OperandNo) {
5166 // Advance to the next operand.
5168 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5169 assert(((NumOps & 7) == 2 /*REGDEF*/ ||
5170 (NumOps & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5171 (NumOps & 7) == 4 /*MEM*/) &&
5172 "Skipped past definitions?");
5173 CurOp += (NumOps>>3)+1;
5177 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5178 if ((NumOps & 7) == 2 /*REGDEF*/
5179 || (NumOps & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5180 // Add NumOps>>3 registers to MatchedRegs.
5181 RegsForValue MatchedRegs;
5182 MatchedRegs.TLI = &TLI;
5183 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5184 MatchedRegs.RegVTs.push_back(AsmNodeOperands[CurOp+1].getValueType());
5185 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
5187 cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
5188 MatchedRegs.Regs.push_back(Reg);
5191 // Use the produced MatchedRegs object to
5192 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5194 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
5197 assert(((NumOps & 7) == 4) && "Unknown matching constraint!");
5198 assert((NumOps >> 3) == 1 && "Unexpected number of operands");
5199 // Add information to the INLINEASM node to know about this input.
5200 AsmNodeOperands.push_back(DAG.getTargetConstant(NumOps,
5201 TLI.getPointerTy()));
5202 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5207 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5208 assert(!OpInfo.isIndirect &&
5209 "Don't know how to handle indirect other inputs yet!");
5211 std::vector<SDValue> Ops;
5212 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5213 hasMemory, Ops, DAG);
5215 cerr << "Invalid operand for inline asm constraint '"
5216 << OpInfo.ConstraintCode << "'!\n";
5220 // Add information to the INLINEASM node to know about this input.
5221 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5222 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5223 TLI.getPointerTy()));
5224 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5226 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5227 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5228 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5229 "Memory operands expect pointer values");
5231 // Add information to the INLINEASM node to know about this input.
5232 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5233 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5234 TLI.getPointerTy()));
5235 AsmNodeOperands.push_back(InOperandVal);
5239 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5240 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5241 "Unknown constraint type!");
5242 assert(!OpInfo.isIndirect &&
5243 "Don't know how to handle indirect register inputs yet!");
5245 // Copy the input into the appropriate registers.
5246 if (OpInfo.AssignedRegs.Regs.empty()) {
5247 cerr << "Couldn't allocate output reg for constraint '"
5248 << OpInfo.ConstraintCode << "'!\n";
5252 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5255 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/,
5256 DAG, AsmNodeOperands);
5259 case InlineAsm::isClobber: {
5260 // Add the clobbered value to the operand list, so that the register
5261 // allocator is aware that the physreg got clobbered.
5262 if (!OpInfo.AssignedRegs.Regs.empty())
5263 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5264 DAG, AsmNodeOperands);
5270 // Finish up input operands.
5271 AsmNodeOperands[0] = Chain;
5272 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5274 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5275 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2,
5276 &AsmNodeOperands[0], AsmNodeOperands.size());
5277 Flag = Chain.getValue(1);
5279 // If this asm returns a register value, copy the result from that register
5280 // and set it as the value of the call.
5281 if (!RetValRegs.Regs.empty()) {
5282 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5285 // FIXME: Why don't we do this for inline asms with MRVs?
5286 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5287 MVT ResultType = TLI.getValueType(CS.getType());
5289 // If any of the results of the inline asm is a vector, it may have the
5290 // wrong width/num elts. This can happen for register classes that can
5291 // contain multiple different value types. The preg or vreg allocated may
5292 // not have the same VT as was expected. Convert it to the right type
5293 // with bit_convert.
5294 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5295 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5298 } else if (ResultType != Val.getValueType() &&
5299 ResultType.isInteger() && Val.getValueType().isInteger()) {
5300 // If a result value was tied to an input value, the computed result may
5301 // have a wider width than the expected result. Extract the relevant
5303 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5306 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5309 setValue(CS.getInstruction(), Val);
5312 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5314 // Process indirect outputs, first output all of the flagged copies out of
5316 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5317 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5318 Value *Ptr = IndirectStoresToEmit[i].second;
5319 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5321 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5324 // Emit the non-flagged stores from the physregs.
5325 SmallVector<SDValue, 8> OutChains;
5326 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5327 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5328 StoresToEmit[i].first,
5329 getValue(StoresToEmit[i].second),
5330 StoresToEmit[i].second, 0));
5331 if (!OutChains.empty())
5332 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5333 &OutChains[0], OutChains.size());
5338 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5339 SDValue Src = getValue(I.getOperand(0));
5341 MVT IntPtr = TLI.getPointerTy();
5343 if (IntPtr.bitsLT(Src.getValueType()))
5344 Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
5345 else if (IntPtr.bitsGT(Src.getValueType()))
5346 Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
5348 // Scale the source by the type size.
5349 uint64_t ElementSize = TD->getTypePaddedSize(I.getType()->getElementType());
5350 Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
5351 Src, DAG.getIntPtrConstant(ElementSize));
5353 TargetLowering::ArgListTy Args;
5354 TargetLowering::ArgListEntry Entry;
5356 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5357 Args.push_back(Entry);
5359 std::pair<SDValue,SDValue> Result =
5360 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5361 CallingConv::C, PerformTailCallOpt,
5362 DAG.getExternalSymbol("malloc", IntPtr),
5363 Args, DAG, getCurDebugLoc());
5364 setValue(&I, Result.first); // Pointers always fit in registers
5365 DAG.setRoot(Result.second);
5368 void SelectionDAGLowering::visitFree(FreeInst &I) {
5369 TargetLowering::ArgListTy Args;
5370 TargetLowering::ArgListEntry Entry;
5371 Entry.Node = getValue(I.getOperand(0));
5372 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5373 Args.push_back(Entry);
5374 MVT IntPtr = TLI.getPointerTy();
5375 std::pair<SDValue,SDValue> Result =
5376 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
5377 CallingConv::C, PerformTailCallOpt,
5378 DAG.getExternalSymbol("free", IntPtr), Args, DAG,
5380 DAG.setRoot(Result.second);
5383 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5384 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5385 MVT::Other, getRoot(),
5386 getValue(I.getOperand(1)),
5387 DAG.getSrcValue(I.getOperand(1))));
5390 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5391 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
5392 getValue(I.getOperand(0)),
5393 DAG.getSrcValue(I.getOperand(0)));
5395 DAG.setRoot(V.getValue(1));
5398 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5399 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5400 MVT::Other, getRoot(),
5401 getValue(I.getOperand(1)),
5402 DAG.getSrcValue(I.getOperand(1))));
5405 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5406 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5407 MVT::Other, getRoot(),
5408 getValue(I.getOperand(1)),
5409 getValue(I.getOperand(2)),
5410 DAG.getSrcValue(I.getOperand(1)),
5411 DAG.getSrcValue(I.getOperand(2))));
5414 /// TargetLowering::LowerArguments - This is the default LowerArguments
5415 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
5416 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
5417 /// integrated into SDISel.
5418 void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
5419 SmallVectorImpl<SDValue> &ArgValues,
5421 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
5422 SmallVector<SDValue, 3+16> Ops;
5423 Ops.push_back(DAG.getRoot());
5424 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
5425 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
5427 // Add one result value for each formal argument.
5428 SmallVector<MVT, 16> RetVals;
5430 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5432 SmallVector<MVT, 4> ValueVTs;
5433 ComputeValueVTs(*this, I->getType(), ValueVTs);
5434 for (unsigned Value = 0, NumValues = ValueVTs.size();
5435 Value != NumValues; ++Value) {
5436 MVT VT = ValueVTs[Value];
5437 const Type *ArgTy = VT.getTypeForMVT();
5438 ISD::ArgFlagsTy Flags;
5439 unsigned OriginalAlignment =
5440 getTargetData()->getABITypeAlignment(ArgTy);
5442 if (F.paramHasAttr(j, Attribute::ZExt))
5444 if (F.paramHasAttr(j, Attribute::SExt))
5446 if (F.paramHasAttr(j, Attribute::InReg))
5448 if (F.paramHasAttr(j, Attribute::StructRet))
5450 if (F.paramHasAttr(j, Attribute::ByVal)) {
5452 const PointerType *Ty = cast<PointerType>(I->getType());
5453 const Type *ElementTy = Ty->getElementType();
5454 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5455 unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
5456 // For ByVal, alignment should be passed from FE. BE will guess if
5457 // this info is not there but there are cases it cannot get right.
5458 if (F.getParamAlignment(j))
5459 FrameAlign = F.getParamAlignment(j);
5460 Flags.setByValAlign(FrameAlign);
5461 Flags.setByValSize(FrameSize);
5463 if (F.paramHasAttr(j, Attribute::Nest))
5465 Flags.setOrigAlign(OriginalAlignment);
5467 MVT RegisterVT = getRegisterType(VT);
5468 unsigned NumRegs = getNumRegisters(VT);
5469 for (unsigned i = 0; i != NumRegs; ++i) {
5470 RetVals.push_back(RegisterVT);
5471 ISD::ArgFlagsTy MyFlags = Flags;
5472 if (NumRegs > 1 && i == 0)
5474 // if it isn't first piece, alignment must be 1
5476 MyFlags.setOrigAlign(1);
5477 Ops.push_back(DAG.getArgFlags(MyFlags));
5482 RetVals.push_back(MVT::Other);
5485 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, dl,
5486 DAG.getVTList(&RetVals[0], RetVals.size()),
5487 &Ops[0], Ops.size()).getNode();
5489 // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
5490 // allows exposing the loads that may be part of the argument access to the
5491 // first DAGCombiner pass.
5492 SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
5494 // The number of results should match up, except that the lowered one may have
5495 // an extra flag result.
5496 assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
5497 (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
5498 TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
5499 && "Lowering produced unexpected number of results!");
5501 // The FORMAL_ARGUMENTS node itself is likely no longer needed.
5502 if (Result != TmpRes.getNode() && Result->use_empty()) {
5503 HandleSDNode Dummy(DAG.getRoot());
5504 DAG.RemoveDeadNode(Result);
5507 Result = TmpRes.getNode();
5509 unsigned NumArgRegs = Result->getNumValues() - 1;
5510 DAG.setRoot(SDValue(Result, NumArgRegs));
5512 // Set up the return result vector.
5515 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5517 SmallVector<MVT, 4> ValueVTs;
5518 ComputeValueVTs(*this, I->getType(), ValueVTs);
5519 for (unsigned Value = 0, NumValues = ValueVTs.size();
5520 Value != NumValues; ++Value) {
5521 MVT VT = ValueVTs[Value];
5522 MVT PartVT = getRegisterType(VT);
5524 unsigned NumParts = getNumRegisters(VT);
5525 SmallVector<SDValue, 4> Parts(NumParts);
5526 for (unsigned j = 0; j != NumParts; ++j)
5527 Parts[j] = SDValue(Result, i++);
5529 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5530 if (F.paramHasAttr(Idx, Attribute::SExt))
5531 AssertOp = ISD::AssertSext;
5532 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5533 AssertOp = ISD::AssertZext;
5535 ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
5536 PartVT, VT, AssertOp));
5539 assert(i == NumArgRegs && "Argument register count mismatch!");
5543 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5544 /// implementation, which just inserts an ISD::CALL node, which is later custom
5545 /// lowered by the target to something concrete. FIXME: When all targets are
5546 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
5547 std::pair<SDValue, SDValue>
5548 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5549 bool RetSExt, bool RetZExt, bool isVarArg,
5551 unsigned CallingConv, bool isTailCall,
5553 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5554 assert((!isTailCall || PerformTailCallOpt) &&
5555 "isTailCall set when tail-call optimizations are disabled!");
5557 SmallVector<SDValue, 32> Ops;
5558 Ops.push_back(Chain); // Op#0 - Chain
5559 Ops.push_back(Callee);
5561 // Handle all of the outgoing arguments.
5562 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5563 SmallVector<MVT, 4> ValueVTs;
5564 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5565 for (unsigned Value = 0, NumValues = ValueVTs.size();
5566 Value != NumValues; ++Value) {
5567 MVT VT = ValueVTs[Value];
5568 const Type *ArgTy = VT.getTypeForMVT();
5569 SDValue Op = SDValue(Args[i].Node.getNode(),
5570 Args[i].Node.getResNo() + Value);
5571 ISD::ArgFlagsTy Flags;
5572 unsigned OriginalAlignment =
5573 getTargetData()->getABITypeAlignment(ArgTy);
5579 if (Args[i].isInReg)
5583 if (Args[i].isByVal) {
5585 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5586 const Type *ElementTy = Ty->getElementType();
5587 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5588 unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
5589 // For ByVal, alignment should come from FE. BE will guess if this
5590 // info is not there but there are cases it cannot get right.
5591 if (Args[i].Alignment)
5592 FrameAlign = Args[i].Alignment;
5593 Flags.setByValAlign(FrameAlign);
5594 Flags.setByValSize(FrameSize);
5598 Flags.setOrigAlign(OriginalAlignment);
5600 MVT PartVT = getRegisterType(VT);
5601 unsigned NumParts = getNumRegisters(VT);
5602 SmallVector<SDValue, 4> Parts(NumParts);
5603 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5606 ExtendKind = ISD::SIGN_EXTEND;
5607 else if (Args[i].isZExt)
5608 ExtendKind = ISD::ZERO_EXTEND;
5610 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5612 for (unsigned i = 0; i != NumParts; ++i) {
5613 // if it isn't first piece, alignment must be 1
5614 ISD::ArgFlagsTy MyFlags = Flags;
5615 if (NumParts > 1 && i == 0)
5618 MyFlags.setOrigAlign(1);
5620 Ops.push_back(Parts[i]);
5621 Ops.push_back(DAG.getArgFlags(MyFlags));
5626 // Figure out the result value types. We start by making a list of
5627 // the potentially illegal return value types.
5628 SmallVector<MVT, 4> LoweredRetTys;
5629 SmallVector<MVT, 4> RetTys;
5630 ComputeValueVTs(*this, RetTy, RetTys);
5632 // Then we translate that to a list of legal types.
5633 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5635 MVT RegisterVT = getRegisterType(VT);
5636 unsigned NumRegs = getNumRegisters(VT);
5637 for (unsigned i = 0; i != NumRegs; ++i)
5638 LoweredRetTys.push_back(RegisterVT);
5641 LoweredRetTys.push_back(MVT::Other); // Always has a chain.
5643 // Create the CALL node.
5644 SDValue Res = DAG.getCall(CallingConv, dl,
5645 isVarArg, isTailCall, isInreg,
5646 DAG.getVTList(&LoweredRetTys[0],
5647 LoweredRetTys.size()),
5650 Chain = Res.getValue(LoweredRetTys.size() - 1);
5652 // Gather up the call result into a single value.
5653 if (RetTy != Type::VoidTy && !RetTys.empty()) {
5654 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5657 AssertOp = ISD::AssertSext;
5659 AssertOp = ISD::AssertZext;
5661 SmallVector<SDValue, 4> ReturnValues;
5663 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5665 MVT RegisterVT = getRegisterType(VT);
5666 unsigned NumRegs = getNumRegisters(VT);
5667 unsigned RegNoEnd = NumRegs + RegNo;
5668 SmallVector<SDValue, 4> Results;
5669 for (; RegNo != RegNoEnd; ++RegNo)
5670 Results.push_back(Res.getValue(RegNo));
5671 SDValue ReturnValue =
5672 getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
5674 ReturnValues.push_back(ReturnValue);
5676 Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5677 DAG.getVTList(&RetTys[0], RetTys.size()),
5678 &ReturnValues[0], ReturnValues.size());
5681 return std::make_pair(Res, Chain);
5684 void TargetLowering::LowerOperationWrapper(SDNode *N,
5685 SmallVectorImpl<SDValue> &Results,
5686 SelectionDAG &DAG) {
5687 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5689 Results.push_back(Res);
5692 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5693 assert(0 && "LowerOperation not implemented for this target!");
5699 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5700 SDValue Op = getValue(V);
5701 assert((Op.getOpcode() != ISD::CopyFromReg ||
5702 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5703 "Copy from a reg to the same reg!");
5704 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5706 RegsForValue RFV(TLI, Reg, V->getType());
5707 SDValue Chain = DAG.getEntryNode();
5708 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5709 PendingExports.push_back(Chain);
5712 #include "llvm/CodeGen/SelectionDAGISel.h"
5714 void SelectionDAGISel::
5715 LowerArguments(BasicBlock *LLVMBB) {
5716 // If this is the entry block, emit arguments.
5717 Function &F = *LLVMBB->getParent();
5718 SDValue OldRoot = SDL->DAG.getRoot();
5719 SmallVector<SDValue, 16> Args;
5720 TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc());
5723 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
5725 SmallVector<MVT, 4> ValueVTs;
5726 ComputeValueVTs(TLI, AI->getType(), ValueVTs);
5727 unsigned NumValues = ValueVTs.size();
5728 if (!AI->use_empty()) {
5729 SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues));
5730 // If this argument is live outside of the entry block, insert a copy from
5731 // whereever we got it to the vreg that other BB's will reference it as.
5732 DenseMap<const Value*, unsigned>::iterator VMI=FuncInfo->ValueMap.find(AI);
5733 if (VMI != FuncInfo->ValueMap.end()) {
5734 SDL->CopyValueToVirtualRegister(AI, VMI->second);
5740 // Finally, if the target has anything special to do, allow it to do so.
5741 // FIXME: this should insert code into the DAG!
5742 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5745 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5746 /// ensure constants are generated when needed. Remember the virtual registers
5747 /// that need to be added to the Machine PHI nodes as input. We cannot just
5748 /// directly add them, because expansion might result in multiple MBB's for one
5749 /// BB. As such, the start of the BB might correspond to a different MBB than
5753 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5754 TerminatorInst *TI = LLVMBB->getTerminator();
5756 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5758 // Check successor nodes' PHI nodes that expect a constant to be available
5760 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5761 BasicBlock *SuccBB = TI->getSuccessor(succ);
5762 if (!isa<PHINode>(SuccBB->begin())) continue;
5763 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5765 // If this terminator has multiple identical successors (common for
5766 // switches), only handle each succ once.
5767 if (!SuccsHandled.insert(SuccMBB)) continue;
5769 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5772 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5773 // nodes and Machine PHI nodes, but the incoming operands have not been
5775 for (BasicBlock::iterator I = SuccBB->begin();
5776 (PN = dyn_cast<PHINode>(I)); ++I) {
5777 // Ignore dead phi's.
5778 if (PN->use_empty()) continue;
5781 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5783 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5784 unsigned &RegOut = SDL->ConstantsOut[C];
5786 RegOut = FuncInfo->CreateRegForValue(C);
5787 SDL->CopyValueToVirtualRegister(C, RegOut);
5791 Reg = FuncInfo->ValueMap[PHIOp];
5793 assert(isa<AllocaInst>(PHIOp) &&
5794 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5795 "Didn't codegen value into a register!??");
5796 Reg = FuncInfo->CreateRegForValue(PHIOp);
5797 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5801 // Remember that this register needs to added to the machine PHI node as
5802 // the input for this MBB.
5803 SmallVector<MVT, 4> ValueVTs;
5804 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5805 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5806 MVT VT = ValueVTs[vti];
5807 unsigned NumRegisters = TLI.getNumRegisters(VT);
5808 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5809 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5810 Reg += NumRegisters;
5814 SDL->ConstantsOut.clear();
5817 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5818 /// supports legal types, and it emits MachineInstrs directly instead of
5819 /// creating SelectionDAG nodes.
5822 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5824 TerminatorInst *TI = LLVMBB->getTerminator();
5826 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5827 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
5829 // Check successor nodes' PHI nodes that expect a constant to be available
5831 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5832 BasicBlock *SuccBB = TI->getSuccessor(succ);
5833 if (!isa<PHINode>(SuccBB->begin())) continue;
5834 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5836 // If this terminator has multiple identical successors (common for
5837 // switches), only handle each succ once.
5838 if (!SuccsHandled.insert(SuccMBB)) continue;
5840 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5843 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5844 // nodes and Machine PHI nodes, but the incoming operands have not been
5846 for (BasicBlock::iterator I = SuccBB->begin();
5847 (PN = dyn_cast<PHINode>(I)); ++I) {
5848 // Ignore dead phi's.
5849 if (PN->use_empty()) continue;
5851 // Only handle legal types. Two interesting things to note here. First,
5852 // by bailing out early, we may leave behind some dead instructions,
5853 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
5854 // own moves. Second, this check is necessary becuase FastISel doesn't
5855 // use CreateRegForValue to create registers, so it always creates
5856 // exactly one register for each non-void instruction.
5857 MVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
5858 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
5861 VT = TLI.getTypeToTransformTo(VT);
5863 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5868 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5870 unsigned Reg = F->getRegForValue(PHIOp);
5872 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5875 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));