1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/CodeGen/SelectionDAGISel.h"
16 #include "llvm/CodeGen/ScheduleDAG.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/InlineAsm.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/CodeGen/IntrinsicLowering.h"
27 #include "llvm/CodeGen/MachineDebugInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineJumpTableInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/SelectionDAG.h"
33 #include "llvm/CodeGen/SSARegMap.h"
34 #include "llvm/Target/MRegisterInfo.h"
35 #include "llvm/Target/TargetData.h"
36 #include "llvm/Target/TargetFrameInfo.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetLowering.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Support/Debug.h"
52 ViewISelDAGs("view-isel-dags", cl::Hidden,
53 cl::desc("Pop up a window to show isel dags as they are selected"));
55 ViewSchedDAGs("view-sched-dags", cl::Hidden,
56 cl::desc("Pop up a window to show sched dags as they are processed"));
58 static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
64 "no-isel-fold-inflight",
66 cl::desc("Do not attempt to fold a node even if it is being selected"));
69 // Scheduling heuristics
70 enum SchedHeuristics {
71 defaultScheduling, // Let the target specify its preference.
72 noScheduling, // No scheduling, emit breadth first sequence.
73 simpleScheduling, // Two pass, min. critical path, max. utilization.
74 simpleNoItinScheduling, // Same as above exact using generic latency.
75 listSchedulingBURR, // Bottom up reg reduction list scheduling.
76 listSchedulingTD // Top-down list scheduler.
80 cl::opt<SchedHeuristics>
83 cl::desc("Choose scheduling style"),
84 cl::init(defaultScheduling),
86 clEnumValN(defaultScheduling, "default",
87 "Target preferred scheduling style"),
88 clEnumValN(noScheduling, "none",
89 "No scheduling: breadth first sequencing"),
90 clEnumValN(simpleScheduling, "simple",
91 "Simple two pass scheduling: minimize critical path "
92 "and maximize processor utilization"),
93 clEnumValN(simpleNoItinScheduling, "simple-noitin",
94 "Simple two pass scheduling: Same as simple "
95 "except using generic latency"),
96 clEnumValN(listSchedulingBURR, "list-burr",
97 "Bottom up register reduction list scheduling"),
98 clEnumValN(listSchedulingTD, "list-td",
99 "Top-down list scheduler"),
104 /// RegsForValue - This struct represents the physical registers that a
105 /// particular value is assigned and the type information about the value.
106 /// This is needed because values can be promoted into larger registers and
107 /// expanded into multiple smaller registers than the value.
108 struct RegsForValue {
109 /// Regs - This list hold the register (for legal and promoted values)
110 /// or register set (for expanded values) that the value should be assigned
112 std::vector<unsigned> Regs;
114 /// RegVT - The value type of each register.
116 MVT::ValueType RegVT;
118 /// ValueVT - The value type of the LLVM value, which may be promoted from
119 /// RegVT or made from merging the two expanded parts.
120 MVT::ValueType ValueVT;
122 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
124 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
125 : RegVT(regvt), ValueVT(valuevt) {
128 RegsForValue(const std::vector<unsigned> ®s,
129 MVT::ValueType regvt, MVT::ValueType valuevt)
130 : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
133 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
134 /// this value and returns the result as a ValueVT value. This uses
135 /// Chain/Flag as the input and updates them for the output Chain/Flag.
136 SDOperand getCopyFromRegs(SelectionDAG &DAG,
137 SDOperand &Chain, SDOperand &Flag) const;
139 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
140 /// specified value into the registers specified by this object. This uses
141 /// Chain/Flag as the input and updates them for the output Chain/Flag.
142 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
143 SDOperand &Chain, SDOperand &Flag) const;
145 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
146 /// operand list. This adds the code marker and includes the number of
147 /// values added into it.
148 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
149 std::vector<SDOperand> &Ops) const;
154 //===--------------------------------------------------------------------===//
155 /// FunctionLoweringInfo - This contains information that is global to a
156 /// function that is used when lowering a region of the function.
157 class FunctionLoweringInfo {
164 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
166 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
167 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
169 /// ValueMap - Since we emit code for the function a basic block at a time,
170 /// we must remember which virtual registers hold the values for
171 /// cross-basic-block values.
172 std::map<const Value*, unsigned> ValueMap;
174 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
175 /// the entry block. This allows the allocas to be efficiently referenced
176 /// anywhere in the function.
177 std::map<const AllocaInst*, int> StaticAllocaMap;
179 unsigned MakeReg(MVT::ValueType VT) {
180 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
183 unsigned CreateRegForValue(const Value *V);
185 unsigned InitializeRegForValue(const Value *V) {
186 unsigned &R = ValueMap[V];
187 assert(R == 0 && "Already initialized this value register!");
188 return R = CreateRegForValue(V);
193 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
194 /// PHI nodes or outside of the basic block that defines it, or used by a
195 /// switch instruction, which may expand to multiple basic blocks.
196 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
197 if (isa<PHINode>(I)) return true;
198 BasicBlock *BB = I->getParent();
199 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
200 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
201 isa<SwitchInst>(*UI))
206 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
207 /// entry block, return true. This includes arguments used by switches, since
208 /// the switch may expand into multiple basic blocks.
209 static bool isOnlyUsedInEntryBlock(Argument *A) {
210 BasicBlock *Entry = A->getParent()->begin();
211 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
212 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
213 return false; // Use not in entry block.
217 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
218 Function &fn, MachineFunction &mf)
219 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
221 // Create a vreg for each argument register that is not dead and is used
222 // outside of the entry block for the function.
223 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
225 if (!isOnlyUsedInEntryBlock(AI))
226 InitializeRegForValue(AI);
228 // Initialize the mapping of values to registers. This is only set up for
229 // instruction values that are used outside of the block that defines
231 Function::iterator BB = Fn.begin(), EB = Fn.end();
232 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
233 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
234 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
235 const Type *Ty = AI->getAllocatedType();
236 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
238 std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
241 // If the alignment of the value is smaller than the size of the value,
242 // and if the size of the value is particularly small (<= 8 bytes),
243 // round up to the size of the value for potentially better performance.
245 // FIXME: This could be made better with a preferred alignment hook in
246 // TargetData. It serves primarily to 8-byte align doubles for X86.
247 if (Align < TySize && TySize <= 8) Align = TySize;
248 TySize *= CUI->getValue(); // Get total allocated size.
249 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
250 StaticAllocaMap[AI] =
251 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
254 for (; BB != EB; ++BB)
255 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
256 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
257 if (!isa<AllocaInst>(I) ||
258 !StaticAllocaMap.count(cast<AllocaInst>(I)))
259 InitializeRegForValue(I);
261 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
262 // also creates the initial PHI MachineInstrs, though none of the input
263 // operands are populated.
264 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
265 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
267 MF.getBasicBlockList().push_back(MBB);
269 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
272 for (BasicBlock::iterator I = BB->begin();
273 (PN = dyn_cast<PHINode>(I)); ++I)
274 if (!PN->use_empty()) {
275 MVT::ValueType VT = TLI.getValueType(PN->getType());
276 unsigned NumElements;
277 if (VT != MVT::Vector)
278 NumElements = TLI.getNumElements(VT);
280 MVT::ValueType VT1,VT2;
282 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
285 unsigned PHIReg = ValueMap[PN];
286 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
287 for (unsigned i = 0; i != NumElements; ++i)
288 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
293 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
294 /// the correctly promoted or expanded types. Assign these registers
295 /// consecutive vreg numbers and return the first assigned number.
296 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
297 MVT::ValueType VT = TLI.getValueType(V->getType());
299 // The number of multiples of registers that we need, to, e.g., split up
300 // a <2 x int64> -> 4 x i32 registers.
301 unsigned NumVectorRegs = 1;
303 // If this is a packed type, figure out what type it will decompose into
304 // and how many of the elements it will use.
305 if (VT == MVT::Vector) {
306 const PackedType *PTy = cast<PackedType>(V->getType());
307 unsigned NumElts = PTy->getNumElements();
308 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
310 // Divide the input until we get to a supported size. This will always
311 // end with a scalar if the target doesn't support vectors.
312 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
319 VT = getVectorType(EltTy, NumElts);
322 // The common case is that we will only create one register for this
323 // value. If we have that case, create and return the virtual register.
324 unsigned NV = TLI.getNumElements(VT);
326 // If we are promoting this value, pick the next largest supported type.
327 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
328 unsigned Reg = MakeReg(PromotedType);
329 // If this is a vector of supported or promoted types (e.g. 4 x i16),
330 // create all of the registers.
331 for (unsigned i = 1; i != NumVectorRegs; ++i)
332 MakeReg(PromotedType);
336 // If this value is represented with multiple target registers, make sure
337 // to create enough consecutive registers of the right (smaller) type.
338 unsigned NT = VT-1; // Find the type to use.
339 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
342 unsigned R = MakeReg((MVT::ValueType)NT);
343 for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
344 MakeReg((MVT::ValueType)NT);
348 //===----------------------------------------------------------------------===//
349 /// SelectionDAGLowering - This is the common target-independent lowering
350 /// implementation that is parameterized by a TargetLowering object.
351 /// Also, targets can overload any lowering method.
354 class SelectionDAGLowering {
355 MachineBasicBlock *CurMBB;
357 std::map<const Value*, SDOperand> NodeMap;
359 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
360 /// them up and then emit token factor nodes when possible. This allows us to
361 /// get simple disambiguation between loads without worrying about alias
363 std::vector<SDOperand> PendingLoads;
365 /// Case - A pair of values to record the Value for a switch case, and the
366 /// case's target basic block.
367 typedef std::pair<Constant*, MachineBasicBlock*> Case;
368 typedef std::vector<Case>::iterator CaseItr;
369 typedef std::pair<CaseItr, CaseItr> CaseRange;
371 /// CaseRec - A struct with ctor used in lowering switches to a binary tree
372 /// of conditional branches.
374 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
375 CaseBB(bb), LT(lt), GE(ge), Range(r) {}
377 /// CaseBB - The MBB in which to emit the compare and branch
378 MachineBasicBlock *CaseBB;
379 /// LT, GE - If nonzero, we know the current case value must be less-than or
380 /// greater-than-or-equal-to these Constants.
383 /// Range - A pair of iterators representing the range of case values to be
384 /// processed at this point in the binary search tree.
388 /// The comparison function for sorting Case values.
390 bool operator () (const Case& C1, const Case& C2) {
391 if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first))
392 return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue();
394 const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first);
395 return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue();
400 // TLI - This is information that describes the available target features we
401 // need for lowering. This indicates when operations are unavailable,
402 // implemented with a libcall, etc.
405 const TargetData &TD;
407 /// SwitchCases - Vector of CaseBlock structures used to communicate
408 /// SwitchInst code generation information.
409 std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
410 SelectionDAGISel::JumpTable JT;
412 /// FuncInfo - Information about the function as a whole.
414 FunctionLoweringInfo &FuncInfo;
416 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
417 FunctionLoweringInfo &funcinfo)
418 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
419 JT(0,0,0,0), FuncInfo(funcinfo) {
422 /// getRoot - Return the current virtual root of the Selection DAG.
424 SDOperand getRoot() {
425 if (PendingLoads.empty())
426 return DAG.getRoot();
428 if (PendingLoads.size() == 1) {
429 SDOperand Root = PendingLoads[0];
431 PendingLoads.clear();
435 // Otherwise, we have to make a token factor node.
436 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
437 PendingLoads.clear();
442 void visit(Instruction &I) { visit(I.getOpcode(), I); }
444 void visit(unsigned Opcode, User &I) {
446 default: assert(0 && "Unknown instruction type encountered!");
448 // Build the switch statement using the Instruction.def file.
449 #define HANDLE_INST(NUM, OPCODE, CLASS) \
450 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
451 #include "llvm/Instruction.def"
455 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
457 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
458 SDOperand SrcValue, SDOperand Root,
461 SDOperand getIntPtrConstant(uint64_t Val) {
462 return DAG.getConstant(Val, TLI.getPointerTy());
465 SDOperand getValue(const Value *V);
467 const SDOperand &setValue(const Value *V, SDOperand NewN) {
468 SDOperand &N = NodeMap[V];
469 assert(N.Val == 0 && "Already set a value for this node!");
473 RegsForValue GetRegistersForValue(const std::string &ConstrCode,
475 bool OutReg, bool InReg,
476 std::set<unsigned> &OutputRegs,
477 std::set<unsigned> &InputRegs);
479 // Terminator instructions.
480 void visitRet(ReturnInst &I);
481 void visitBr(BranchInst &I);
482 void visitSwitch(SwitchInst &I);
483 void visitUnreachable(UnreachableInst &I) { /* noop */ }
485 // Helper for visitSwitch
486 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
487 void visitJumpTable(SelectionDAGISel::JumpTable &JT);
489 // These all get lowered before this pass.
490 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
491 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
493 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
494 void visitShift(User &I, unsigned Opcode);
495 void visitAdd(User &I) {
496 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
498 void visitSub(User &I);
499 void visitMul(User &I) {
500 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
502 void visitDiv(User &I) {
503 const Type *Ty = I.getType();
505 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV,
506 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV);
508 void visitRem(User &I) {
509 const Type *Ty = I.getType();
510 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
512 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); }
513 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); }
514 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); }
515 void visitShl(User &I) { visitShift(I, ISD::SHL); }
516 void visitShr(User &I) {
517 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
520 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
521 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
522 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
523 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
524 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
525 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
526 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
528 void visitExtractElement(User &I);
529 void visitInsertElement(User &I);
530 void visitShuffleVector(User &I);
532 void visitGetElementPtr(User &I);
533 void visitCast(User &I);
534 void visitSelect(User &I);
536 void visitMalloc(MallocInst &I);
537 void visitFree(FreeInst &I);
538 void visitAlloca(AllocaInst &I);
539 void visitLoad(LoadInst &I);
540 void visitStore(StoreInst &I);
541 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
542 void visitCall(CallInst &I);
543 void visitInlineAsm(CallInst &I);
544 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
545 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
547 void visitVAStart(CallInst &I);
548 void visitVAArg(VAArgInst &I);
549 void visitVAEnd(CallInst &I);
550 void visitVACopy(CallInst &I);
551 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
553 void visitMemIntrinsic(CallInst &I, unsigned Op);
555 void visitUserOp1(Instruction &I) {
556 assert(0 && "UserOp1 should not exist at instruction selection time!");
559 void visitUserOp2(Instruction &I) {
560 assert(0 && "UserOp2 should not exist at instruction selection time!");
564 } // end namespace llvm
566 SDOperand SelectionDAGLowering::getValue(const Value *V) {
567 SDOperand &N = NodeMap[V];
570 const Type *VTy = V->getType();
571 MVT::ValueType VT = TLI.getValueType(VTy);
572 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
573 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
574 visit(CE->getOpcode(), *CE);
575 assert(N.Val && "visit didn't populate the ValueMap!");
577 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
578 return N = DAG.getGlobalAddress(GV, VT);
579 } else if (isa<ConstantPointerNull>(C)) {
580 return N = DAG.getConstant(0, TLI.getPointerTy());
581 } else if (isa<UndefValue>(C)) {
582 if (!isa<PackedType>(VTy))
583 return N = DAG.getNode(ISD::UNDEF, VT);
585 // Create a VBUILD_VECTOR of undef nodes.
586 const PackedType *PTy = cast<PackedType>(VTy);
587 unsigned NumElements = PTy->getNumElements();
588 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
590 std::vector<SDOperand> Ops;
591 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
593 // Create a VConstant node with generic Vector type.
594 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
595 Ops.push_back(DAG.getValueType(PVT));
596 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
597 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
598 return N = DAG.getConstantFP(CFP->getValue(), VT);
599 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
600 unsigned NumElements = PTy->getNumElements();
601 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
603 // Now that we know the number and type of the elements, push a
604 // Constant or ConstantFP node onto the ops list for each element of
605 // the packed constant.
606 std::vector<SDOperand> Ops;
607 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
608 for (unsigned i = 0; i != NumElements; ++i)
609 Ops.push_back(getValue(CP->getOperand(i)));
611 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
613 if (MVT::isFloatingPoint(PVT))
614 Op = DAG.getConstantFP(0, PVT);
616 Op = DAG.getConstant(0, PVT);
617 Ops.assign(NumElements, Op);
620 // Create a VBUILD_VECTOR node with generic Vector type.
621 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
622 Ops.push_back(DAG.getValueType(PVT));
623 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
625 // Canonicalize all constant ints to be unsigned.
626 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
630 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
631 std::map<const AllocaInst*, int>::iterator SI =
632 FuncInfo.StaticAllocaMap.find(AI);
633 if (SI != FuncInfo.StaticAllocaMap.end())
634 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
637 std::map<const Value*, unsigned>::const_iterator VMI =
638 FuncInfo.ValueMap.find(V);
639 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
641 unsigned InReg = VMI->second;
643 // If this type is not legal, make it so now.
644 if (VT != MVT::Vector) {
645 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
647 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
649 // Source must be expanded. This input value is actually coming from the
650 // register pair VMI->second and VMI->second+1.
651 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
652 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
653 } else if (DestVT > VT) { // Promotion case
654 if (MVT::isFloatingPoint(VT))
655 N = DAG.getNode(ISD::FP_ROUND, VT, N);
657 N = DAG.getNode(ISD::TRUNCATE, VT, N);
660 // Otherwise, if this is a vector, make it available as a generic vector
662 MVT::ValueType PTyElementVT, PTyLegalElementVT;
663 const PackedType *PTy = cast<PackedType>(VTy);
664 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
667 // Build a VBUILD_VECTOR with the input registers.
668 std::vector<SDOperand> Ops;
669 if (PTyElementVT == PTyLegalElementVT) {
670 // If the value types are legal, just VBUILD the CopyFromReg nodes.
671 for (unsigned i = 0; i != NE; ++i)
672 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
674 } else if (PTyElementVT < PTyLegalElementVT) {
675 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
676 for (unsigned i = 0; i != NE; ++i) {
677 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
679 if (MVT::isFloatingPoint(PTyElementVT))
680 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
682 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
686 // If the register was expanded, use BUILD_PAIR.
687 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
688 for (unsigned i = 0; i != NE/2; ++i) {
689 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
691 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
693 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
697 Ops.push_back(DAG.getConstant(NE, MVT::i32));
698 Ops.push_back(DAG.getValueType(PTyLegalElementVT));
699 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
701 // Finally, use a VBIT_CONVERT to make this available as the appropriate
703 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
704 DAG.getConstant(PTy->getNumElements(),
706 DAG.getValueType(TLI.getValueType(PTy->getElementType())));
713 void SelectionDAGLowering::visitRet(ReturnInst &I) {
714 if (I.getNumOperands() == 0) {
715 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
718 std::vector<SDOperand> NewValues;
719 NewValues.push_back(getRoot());
720 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
721 SDOperand RetOp = getValue(I.getOperand(i));
723 // If this is an integer return value, we need to promote it ourselves to
724 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
726 if (MVT::isInteger(RetOp.getValueType()) &&
727 RetOp.getValueType() < MVT::i64) {
728 MVT::ValueType TmpVT;
729 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
730 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
734 if (I.getOperand(i)->getType()->isSigned())
735 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
737 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
739 NewValues.push_back(RetOp);
741 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues));
744 void SelectionDAGLowering::visitBr(BranchInst &I) {
745 // Update machine-CFG edges.
746 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
747 CurMBB->addSuccessor(Succ0MBB);
749 // Figure out which block is immediately after the current one.
750 MachineBasicBlock *NextBlock = 0;
751 MachineFunction::iterator BBI = CurMBB;
752 if (++BBI != CurMBB->getParent()->end())
755 if (I.isUnconditional()) {
756 // If this is not a fall-through branch, emit the branch.
757 if (Succ0MBB != NextBlock)
758 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
759 DAG.getBasicBlock(Succ0MBB)));
761 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
762 CurMBB->addSuccessor(Succ1MBB);
764 SDOperand Cond = getValue(I.getCondition());
765 if (Succ1MBB == NextBlock) {
766 // If the condition is false, fall through. This means we should branch
767 // if the condition is true to Succ #0.
768 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
769 Cond, DAG.getBasicBlock(Succ0MBB)));
770 } else if (Succ0MBB == NextBlock) {
771 // If the condition is true, fall through. This means we should branch if
772 // the condition is false to Succ #1. Invert the condition first.
773 SDOperand True = DAG.getConstant(1, Cond.getValueType());
774 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
775 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
776 Cond, DAG.getBasicBlock(Succ1MBB)));
778 std::vector<SDOperand> Ops;
779 Ops.push_back(getRoot());
780 // If the false case is the current basic block, then this is a self
781 // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it
782 // adds an extra instruction in the loop. Instead, invert the
783 // condition and emit "Loop: ... br!cond Loop; br Out.
784 if (CurMBB == Succ1MBB) {
785 std::swap(Succ0MBB, Succ1MBB);
786 SDOperand True = DAG.getConstant(1, Cond.getValueType());
787 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
789 SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
790 DAG.getBasicBlock(Succ0MBB));
791 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True,
792 DAG.getBasicBlock(Succ1MBB)));
797 /// visitSwitchCase - Emits the necessary code to represent a single node in
798 /// the binary search tree resulting from lowering a switch instruction.
799 void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
800 SDOperand SwitchOp = getValue(CB.SwitchV);
801 SDOperand CaseOp = getValue(CB.CaseC);
802 SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC);
804 // Set NextBlock to be the MBB immediately after the current one, if any.
805 // This is used to avoid emitting unnecessary branches to the next block.
806 MachineBasicBlock *NextBlock = 0;
807 MachineFunction::iterator BBI = CurMBB;
808 if (++BBI != CurMBB->getParent()->end())
811 // If the lhs block is the next block, invert the condition so that we can
812 // fall through to the lhs instead of the rhs block.
813 if (CB.LHSBB == NextBlock) {
814 std::swap(CB.LHSBB, CB.RHSBB);
815 SDOperand True = DAG.getConstant(1, Cond.getValueType());
816 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
818 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
819 DAG.getBasicBlock(CB.LHSBB));
820 if (CB.RHSBB == NextBlock)
823 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
824 DAG.getBasicBlock(CB.RHSBB)));
825 // Update successor info
826 CurMBB->addSuccessor(CB.LHSBB);
827 CurMBB->addSuccessor(CB.RHSBB);
830 /// visitSwitchCase - Emits the necessary code to represent a single node in
831 /// the binary search tree resulting from lowering a switch instruction.
832 void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
833 // FIXME: Need to emit different code for PIC vs. Non-PIC, specifically,
834 // we need to add the address of the jump table to the value loaded, since
835 // the entries in the jump table will be differences rather than absolute
838 // Emit the code for the jump table
839 MVT::ValueType PTy = TLI.getPointerTy();
840 unsigned PTyBytes = MVT::getSizeInBits(PTy)/8;
841 SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy);
842 SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy,
843 DAG.getConstant(PTyBytes, PTy));
844 SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, DAG.getJumpTable(JT.JTI,PTy));
845 SDOperand LD = DAG.getLoad(PTy, Copy.getValue(1), ADD, DAG.getSrcValue(0));
846 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD));
848 // Update successor info
849 for (std::set<MachineBasicBlock*>::iterator ii = JT.SuccMBBs.begin(),
850 ee = JT.SuccMBBs.end(); ii != ee; ++ii)
851 JT.MBB->addSuccessor(*ii);
854 void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
855 // Figure out which block is immediately after the current one.
856 MachineBasicBlock *NextBlock = 0;
857 MachineFunction::iterator BBI = CurMBB;
858 if (++BBI != CurMBB->getParent()->end())
861 // If there is only the default destination, branch to it if it is not the
862 // next basic block. Otherwise, just fall through.
863 if (I.getNumOperands() == 2) {
864 // Update machine-CFG edges.
865 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()];
866 // If this is not a fall-through branch, emit the branch.
867 if (DefaultMBB != NextBlock)
868 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
869 DAG.getBasicBlock(DefaultMBB)));
873 // If there are any non-default case statements, create a vector of Cases
874 // representing each one, and sort the vector so that we can efficiently
875 // create a binary search tree from them.
876 std::vector<Case> Cases;
877 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
878 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
879 Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
881 std::sort(Cases.begin(), Cases.end(), CaseCmp());
883 // Get the Value to be switched on and default basic blocks, which will be
884 // inserted into CaseBlock records, representing basic blocks in the binary
886 Value *SV = I.getOperand(0);
887 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
889 // Get the MachineFunction which holds the current MBB. This is used during
890 // emission of jump tables, and when inserting any additional MBBs necessary
891 // to represent the switch.
892 MachineFunction *CurMF = CurMBB->getParent();
893 const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
894 Reloc::Model Relocs = TLI.getTargetMachine().getRelocationModel();
896 // If the switch has more than 3 blocks, and is 100% dense, then emit a jump
897 // table rather than lowering the switch to a binary tree of conditional
899 // FIXME: Make this work with 64 bit targets someday, possibly by always
900 // doing differences there so that entries stay 32 bits.
901 // FIXME: Make this work with PIC code
902 if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) &&
903 TLI.getPointerTy() == MVT::i32 &&
904 (Relocs == Reloc::Static || Relocs == Reloc::DynamicNoPIC) &&
906 uint64_t First = cast<ConstantIntegral>(Cases.front().first)->getRawValue();
907 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getRawValue();
910 // FIXME: support sub-100% density
911 if (((Last - First) + 1ULL) == (uint64_t)Cases.size()) {
912 // Create a new basic block to hold the code for loading the address
913 // of the jump table, and jumping to it. Update successor information;
914 // we will either branch to the default case for the switch, or the jump
916 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB);
917 CurMF->getBasicBlockList().insert(BBI, JumpTableBB);
918 CurMBB->addSuccessor(Default);
919 CurMBB->addSuccessor(JumpTableBB);
921 // Subtract the lowest switch case value from the value being switched on
922 // and conditional branch to default mbb if the result is greater than the
923 // difference between smallest and largest cases.
924 SDOperand SwitchOp = getValue(SV);
925 MVT::ValueType VT = SwitchOp.getValueType();
926 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
927 DAG.getConstant(First, VT));
929 // The SDNode we just created, which holds the value being switched on
930 // minus the the smallest case value, needs to be copied to a virtual
931 // register so it can be used as an index into the jump table in a
932 // subsequent basic block. This value may be smaller or larger than the
933 // target's pointer type, and therefore require extension or truncating.
934 if (VT > TLI.getPointerTy())
935 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
937 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
938 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
939 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp);
941 // Emit the range check for the jump table, and branch to the default
942 // block for the switch statement if the value being switched on exceeds
943 // the largest case in the switch.
944 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB,
945 DAG.getConstant(Last-First,VT), ISD::SETUGT);
946 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
947 DAG.getBasicBlock(Default)));
949 // Build a sorted vector of destination BBs, corresponding to each target
951 // FIXME: need to insert DefaultMBB for each "hole" in the jump table,
952 // when we support jump tables with < 100% density.
953 std::set<MachineBasicBlock*> UniqueBBs;
954 std::vector<MachineBasicBlock*> DestBBs;
955 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++ii) {
956 DestBBs.push_back(ii->second);
957 UniqueBBs.insert(ii->second);
959 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
961 // Set the jump table information so that we can codegen it as a second
963 JT.Reg = JumpTableReg;
965 JT.MBB = JumpTableBB;
966 JT.Default = Default;
967 JT.SuccMBBs = UniqueBBs;
972 // Push the initial CaseRec onto the worklist
973 std::vector<CaseRec> CaseVec;
974 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
976 while (!CaseVec.empty()) {
977 // Grab a record representing a case range to process off the worklist
978 CaseRec CR = CaseVec.back();
981 // Size is the number of Cases represented by this range. If Size is 1,
982 // then we are processing a leaf of the binary search tree. Otherwise,
983 // we need to pick a pivot, and push left and right ranges onto the
985 unsigned Size = CR.Range.second - CR.Range.first;
988 // Create a CaseBlock record representing a conditional branch to
989 // the Case's target mbb if the value being switched on SV is equal
990 // to C. Otherwise, branch to default.
991 Constant *C = CR.Range.first->first;
992 MachineBasicBlock *Target = CR.Range.first->second;
993 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
995 // If the MBB representing the leaf node is the current MBB, then just
996 // call visitSwitchCase to emit the code into the current block.
997 // Otherwise, push the CaseBlock onto the vector to be later processed
998 // by SDISel, and insert the node's MBB before the next MBB.
999 if (CR.CaseBB == CurMBB)
1000 visitSwitchCase(CB);
1002 SwitchCases.push_back(CB);
1003 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
1006 // split case range at pivot
1007 CaseItr Pivot = CR.Range.first + (Size / 2);
1008 CaseRange LHSR(CR.Range.first, Pivot);
1009 CaseRange RHSR(Pivot, CR.Range.second);
1010 Constant *C = Pivot->first;
1011 MachineBasicBlock *RHSBB = 0, *LHSBB = 0;
1012 // We know that we branch to the LHS if the Value being switched on is
1013 // less than the Pivot value, C. We use this to optimize our binary
1014 // tree a bit, by recognizing that if SV is greater than or equal to the
1015 // LHS's Case Value, and that Case Value is exactly one less than the
1016 // Pivot's Value, then we can branch directly to the LHS's Target,
1017 // rather than creating a leaf node for it.
1018 if ((LHSR.second - LHSR.first) == 1 &&
1019 LHSR.first->first == CR.GE &&
1020 cast<ConstantIntegral>(C)->getRawValue() ==
1021 (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) {
1022 LHSBB = LHSR.first->second;
1024 LHSBB = new MachineBasicBlock(LLVMBB);
1025 CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR));
1027 // Similar to the optimization above, if the Value being switched on is
1028 // known to be less than the Constant CR.LT, and the current Case Value
1029 // is CR.LT - 1, then we can branch directly to the target block for
1030 // the current Case Value, rather than emitting a RHS leaf node for it.
1031 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1032 cast<ConstantIntegral>(RHSR.first->first)->getRawValue() ==
1033 (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) {
1034 RHSBB = RHSR.first->second;
1036 RHSBB = new MachineBasicBlock(LLVMBB);
1037 CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR));
1039 // Create a CaseBlock record representing a conditional branch to
1040 // the LHS node if the value being switched on SV is less than C.
1041 // Otherwise, branch to LHS.
1042 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
1043 SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB);
1044 if (CR.CaseBB == CurMBB)
1045 visitSwitchCase(CB);
1047 SwitchCases.push_back(CB);
1048 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
1054 void SelectionDAGLowering::visitSub(User &I) {
1055 // -0.0 - X --> fneg
1056 if (I.getType()->isFloatingPoint()) {
1057 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
1058 if (CFP->isExactlyValue(-0.0)) {
1059 SDOperand Op2 = getValue(I.getOperand(1));
1060 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
1064 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
1067 void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
1069 const Type *Ty = I.getType();
1070 SDOperand Op1 = getValue(I.getOperand(0));
1071 SDOperand Op2 = getValue(I.getOperand(1));
1073 if (Ty->isIntegral()) {
1074 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
1075 } else if (Ty->isFloatingPoint()) {
1076 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
1078 const PackedType *PTy = cast<PackedType>(Ty);
1079 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
1080 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
1081 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
1085 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
1086 SDOperand Op1 = getValue(I.getOperand(0));
1087 SDOperand Op2 = getValue(I.getOperand(1));
1089 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
1091 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
1094 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
1095 ISD::CondCode UnsignedOpcode) {
1096 SDOperand Op1 = getValue(I.getOperand(0));
1097 SDOperand Op2 = getValue(I.getOperand(1));
1098 ISD::CondCode Opcode = SignedOpcode;
1099 if (I.getOperand(0)->getType()->isUnsigned())
1100 Opcode = UnsignedOpcode;
1101 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
1104 void SelectionDAGLowering::visitSelect(User &I) {
1105 SDOperand Cond = getValue(I.getOperand(0));
1106 SDOperand TrueVal = getValue(I.getOperand(1));
1107 SDOperand FalseVal = getValue(I.getOperand(2));
1108 if (!isa<PackedType>(I.getType())) {
1109 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
1110 TrueVal, FalseVal));
1112 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal,
1113 *(TrueVal.Val->op_end()-2),
1114 *(TrueVal.Val->op_end()-1)));
1118 void SelectionDAGLowering::visitCast(User &I) {
1119 SDOperand N = getValue(I.getOperand(0));
1120 MVT::ValueType SrcVT = N.getValueType();
1121 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1123 if (DestVT == MVT::Vector) {
1124 // This is a cast to a vector from something else. This is always a bit
1125 // convert. Get information about the input vector.
1126 const PackedType *DestTy = cast<PackedType>(I.getType());
1127 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1128 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
1129 DAG.getConstant(DestTy->getNumElements(),MVT::i32),
1130 DAG.getValueType(EltVT)));
1131 } else if (SrcVT == DestVT) {
1132 setValue(&I, N); // noop cast.
1133 } else if (DestVT == MVT::i1) {
1134 // Cast to bool is a comparison against zero, not truncation to zero.
1135 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
1136 DAG.getConstantFP(0.0, N.getValueType());
1137 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
1138 } else if (isInteger(SrcVT)) {
1139 if (isInteger(DestVT)) { // Int -> Int cast
1140 if (DestVT < SrcVT) // Truncating cast?
1141 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1142 else if (I.getOperand(0)->getType()->isSigned())
1143 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
1145 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1146 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
1147 if (I.getOperand(0)->getType()->isSigned())
1148 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
1150 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
1152 assert(0 && "Unknown cast!");
1154 } else if (isFloatingPoint(SrcVT)) {
1155 if (isFloatingPoint(DestVT)) { // FP -> FP cast
1156 if (DestVT < SrcVT) // Rounding cast?
1157 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
1159 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
1160 } else if (isInteger(DestVT)) { // FP -> Int cast.
1161 if (I.getType()->isSigned())
1162 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
1164 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
1166 assert(0 && "Unknown cast!");
1169 assert(SrcVT == MVT::Vector && "Unknown cast!");
1170 assert(DestVT != MVT::Vector && "Casts to vector already handled!");
1171 // This is a cast from a vector to something else. This is always a bit
1172 // convert. Get information about the input vector.
1173 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
1177 void SelectionDAGLowering::visitInsertElement(User &I) {
1178 SDOperand InVec = getValue(I.getOperand(0));
1179 SDOperand InVal = getValue(I.getOperand(1));
1180 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1181 getValue(I.getOperand(2)));
1183 SDOperand Num = *(InVec.Val->op_end()-2);
1184 SDOperand Typ = *(InVec.Val->op_end()-1);
1185 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
1186 InVec, InVal, InIdx, Num, Typ));
1189 void SelectionDAGLowering::visitExtractElement(User &I) {
1190 SDOperand InVec = getValue(I.getOperand(0));
1191 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1192 getValue(I.getOperand(1)));
1193 SDOperand Typ = *(InVec.Val->op_end()-1);
1194 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
1195 TLI.getValueType(I.getType()), InVec, InIdx));
1198 void SelectionDAGLowering::visitShuffleVector(User &I) {
1199 SDOperand V1 = getValue(I.getOperand(0));
1200 SDOperand V2 = getValue(I.getOperand(1));
1201 SDOperand Mask = getValue(I.getOperand(2));
1203 SDOperand Num = *(V1.Val->op_end()-2);
1204 SDOperand Typ = *(V2.Val->op_end()-1);
1205 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
1206 V1, V2, Mask, Num, Typ));
1210 void SelectionDAGLowering::visitGetElementPtr(User &I) {
1211 SDOperand N = getValue(I.getOperand(0));
1212 const Type *Ty = I.getOperand(0)->getType();
1213 const Type *UIntPtrTy = TD.getIntPtrType();
1215 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
1218 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1219 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1222 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
1223 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
1224 getIntPtrConstant(Offset));
1226 Ty = StTy->getElementType(Field);
1228 Ty = cast<SequentialType>(Ty)->getElementType();
1230 // If this is a constant subscript, handle it quickly.
1231 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1232 if (CI->getRawValue() == 0) continue;
1235 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1236 Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
1238 Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1239 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
1243 // N = N + Idx * ElementSize;
1244 uint64_t ElementSize = TD.getTypeSize(Ty);
1245 SDOperand IdxN = getValue(Idx);
1247 // If the index is smaller or larger than intptr_t, truncate or extend
1249 if (IdxN.getValueType() < N.getValueType()) {
1250 if (Idx->getType()->isSigned())
1251 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
1253 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
1254 } else if (IdxN.getValueType() > N.getValueType())
1255 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
1257 // If this is a multiply by a power of two, turn it into a shl
1258 // immediately. This is a very common case.
1259 if (isPowerOf2_64(ElementSize)) {
1260 unsigned Amt = Log2_64(ElementSize);
1261 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
1262 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
1263 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1267 SDOperand Scale = getIntPtrConstant(ElementSize);
1268 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
1269 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1275 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
1276 // If this is a fixed sized alloca in the entry block of the function,
1277 // allocate it statically on the stack.
1278 if (FuncInfo.StaticAllocaMap.count(&I))
1279 return; // getValue will auto-populate this.
1281 const Type *Ty = I.getAllocatedType();
1282 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
1283 unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
1286 SDOperand AllocSize = getValue(I.getArraySize());
1287 MVT::ValueType IntPtr = TLI.getPointerTy();
1288 if (IntPtr < AllocSize.getValueType())
1289 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
1290 else if (IntPtr > AllocSize.getValueType())
1291 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
1293 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
1294 getIntPtrConstant(TySize));
1296 // Handle alignment. If the requested alignment is less than or equal to the
1297 // stack alignment, ignore it and round the size of the allocation up to the
1298 // stack alignment size. If the size is greater than the stack alignment, we
1299 // note this in the DYNAMIC_STACKALLOC node.
1300 unsigned StackAlign =
1301 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
1302 if (Align <= StackAlign) {
1304 // Add SA-1 to the size.
1305 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
1306 getIntPtrConstant(StackAlign-1));
1307 // Mask out the low bits for alignment purposes.
1308 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
1309 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
1312 std::vector<MVT::ValueType> VTs;
1313 VTs.push_back(AllocSize.getValueType());
1314 VTs.push_back(MVT::Other);
1315 std::vector<SDOperand> Ops;
1316 Ops.push_back(getRoot());
1317 Ops.push_back(AllocSize);
1318 Ops.push_back(getIntPtrConstant(Align));
1319 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
1320 DAG.setRoot(setValue(&I, DSA).getValue(1));
1322 // Inform the Frame Information that we have just allocated a variable-sized
1324 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
1327 void SelectionDAGLowering::visitLoad(LoadInst &I) {
1328 SDOperand Ptr = getValue(I.getOperand(0));
1334 // Do not serialize non-volatile loads against each other.
1335 Root = DAG.getRoot();
1338 setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)),
1339 Root, I.isVolatile()));
1342 SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
1343 SDOperand SrcValue, SDOperand Root,
1346 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1347 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
1348 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue);
1350 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue);
1354 DAG.setRoot(L.getValue(1));
1356 PendingLoads.push_back(L.getValue(1));
1362 void SelectionDAGLowering::visitStore(StoreInst &I) {
1363 Value *SrcV = I.getOperand(0);
1364 SDOperand Src = getValue(SrcV);
1365 SDOperand Ptr = getValue(I.getOperand(1));
1366 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1367 DAG.getSrcValue(I.getOperand(1))));
1370 /// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
1371 /// access memory and has no other side effects at all.
1372 static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
1373 #define GET_NO_MEMORY_INTRINSICS
1374 #include "llvm/Intrinsics.gen"
1375 #undef GET_NO_MEMORY_INTRINSICS
1379 // IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
1380 // have any side-effects or if it only reads memory.
1381 static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
1382 #define GET_SIDE_EFFECT_INFO
1383 #include "llvm/Intrinsics.gen"
1384 #undef GET_SIDE_EFFECT_INFO
1388 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
1390 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
1391 unsigned Intrinsic) {
1392 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
1393 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
1395 // Build the operand list.
1396 std::vector<SDOperand> Ops;
1397 if (HasChain) { // If this intrinsic has side-effects, chainify it.
1399 // We don't need to serialize loads against other loads.
1400 Ops.push_back(DAG.getRoot());
1402 Ops.push_back(getRoot());
1406 // Add the intrinsic ID as an integer operand.
1407 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
1409 // Add all operands of the call to the operand list.
1410 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1411 SDOperand Op = getValue(I.getOperand(i));
1413 // If this is a vector type, force it to the right packed type.
1414 if (Op.getValueType() == MVT::Vector) {
1415 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
1416 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
1418 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
1419 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
1420 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
1423 assert(TLI.isTypeLegal(Op.getValueType()) &&
1424 "Intrinsic uses a non-legal type?");
1428 std::vector<MVT::ValueType> VTs;
1429 if (I.getType() != Type::VoidTy) {
1430 MVT::ValueType VT = TLI.getValueType(I.getType());
1431 if (VT == MVT::Vector) {
1432 const PackedType *DestTy = cast<PackedType>(I.getType());
1433 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1435 VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
1436 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
1439 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
1443 VTs.push_back(MVT::Other);
1448 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops);
1449 else if (I.getType() != Type::VoidTy)
1450 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops);
1452 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops);
1455 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
1457 PendingLoads.push_back(Chain);
1461 if (I.getType() != Type::VoidTy) {
1462 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
1463 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
1464 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
1465 DAG.getConstant(PTy->getNumElements(), MVT::i32),
1466 DAG.getValueType(EVT));
1468 setValue(&I, Result);
1472 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
1473 /// we want to emit this as a call to a named external function, return the name
1474 /// otherwise lower it and return null.
1476 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1477 switch (Intrinsic) {
1479 // By default, turn this into a target intrinsic node.
1480 visitTargetIntrinsic(I, Intrinsic);
1482 case Intrinsic::vastart: visitVAStart(I); return 0;
1483 case Intrinsic::vaend: visitVAEnd(I); return 0;
1484 case Intrinsic::vacopy: visitVACopy(I); return 0;
1485 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
1486 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
1487 case Intrinsic::setjmp:
1488 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1490 case Intrinsic::longjmp:
1491 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1493 case Intrinsic::memcpy_i32:
1494 case Intrinsic::memcpy_i64:
1495 visitMemIntrinsic(I, ISD::MEMCPY);
1497 case Intrinsic::memset_i32:
1498 case Intrinsic::memset_i64:
1499 visitMemIntrinsic(I, ISD::MEMSET);
1501 case Intrinsic::memmove_i32:
1502 case Intrinsic::memmove_i64:
1503 visitMemIntrinsic(I, ISD::MEMMOVE);
1506 case Intrinsic::dbg_stoppoint: {
1507 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1508 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
1509 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
1510 std::vector<SDOperand> Ops;
1512 Ops.push_back(getRoot());
1513 Ops.push_back(getValue(SPI.getLineValue()));
1514 Ops.push_back(getValue(SPI.getColumnValue()));
1516 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
1517 assert(DD && "Not a debug information descriptor");
1518 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
1520 Ops.push_back(DAG.getString(CompileUnit->getFileName()));
1521 Ops.push_back(DAG.getString(CompileUnit->getDirectory()));
1523 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
1528 case Intrinsic::dbg_region_start: {
1529 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1530 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
1531 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
1532 std::vector<SDOperand> Ops;
1534 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
1536 Ops.push_back(getRoot());
1537 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1539 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1544 case Intrinsic::dbg_region_end: {
1545 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1546 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
1547 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
1548 std::vector<SDOperand> Ops;
1550 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
1552 Ops.push_back(getRoot());
1553 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1555 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1560 case Intrinsic::dbg_func_start: {
1561 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1562 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
1563 if (DebugInfo && FSI.getSubprogram() &&
1564 DebugInfo->Verify(FSI.getSubprogram())) {
1565 std::vector<SDOperand> Ops;
1567 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
1569 Ops.push_back(getRoot());
1570 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1572 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1577 case Intrinsic::dbg_declare: {
1578 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1579 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
1580 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
1581 std::vector<SDOperand> Ops;
1583 SDOperand AddressOp = getValue(DI.getAddress());
1584 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) {
1585 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
1592 case Intrinsic::isunordered_f32:
1593 case Intrinsic::isunordered_f64:
1594 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1595 getValue(I.getOperand(2)), ISD::SETUO));
1598 case Intrinsic::sqrt_f32:
1599 case Intrinsic::sqrt_f64:
1600 setValue(&I, DAG.getNode(ISD::FSQRT,
1601 getValue(I.getOperand(1)).getValueType(),
1602 getValue(I.getOperand(1))));
1604 case Intrinsic::pcmarker: {
1605 SDOperand Tmp = getValue(I.getOperand(1));
1606 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1609 case Intrinsic::readcyclecounter: {
1610 std::vector<MVT::ValueType> VTs;
1611 VTs.push_back(MVT::i64);
1612 VTs.push_back(MVT::Other);
1613 std::vector<SDOperand> Ops;
1614 Ops.push_back(getRoot());
1615 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1617 DAG.setRoot(Tmp.getValue(1));
1620 case Intrinsic::bswap_i16:
1621 case Intrinsic::bswap_i32:
1622 case Intrinsic::bswap_i64:
1623 setValue(&I, DAG.getNode(ISD::BSWAP,
1624 getValue(I.getOperand(1)).getValueType(),
1625 getValue(I.getOperand(1))));
1627 case Intrinsic::cttz_i8:
1628 case Intrinsic::cttz_i16:
1629 case Intrinsic::cttz_i32:
1630 case Intrinsic::cttz_i64:
1631 setValue(&I, DAG.getNode(ISD::CTTZ,
1632 getValue(I.getOperand(1)).getValueType(),
1633 getValue(I.getOperand(1))));
1635 case Intrinsic::ctlz_i8:
1636 case Intrinsic::ctlz_i16:
1637 case Intrinsic::ctlz_i32:
1638 case Intrinsic::ctlz_i64:
1639 setValue(&I, DAG.getNode(ISD::CTLZ,
1640 getValue(I.getOperand(1)).getValueType(),
1641 getValue(I.getOperand(1))));
1643 case Intrinsic::ctpop_i8:
1644 case Intrinsic::ctpop_i16:
1645 case Intrinsic::ctpop_i32:
1646 case Intrinsic::ctpop_i64:
1647 setValue(&I, DAG.getNode(ISD::CTPOP,
1648 getValue(I.getOperand(1)).getValueType(),
1649 getValue(I.getOperand(1))));
1651 case Intrinsic::stacksave: {
1652 std::vector<MVT::ValueType> VTs;
1653 VTs.push_back(TLI.getPointerTy());
1654 VTs.push_back(MVT::Other);
1655 std::vector<SDOperand> Ops;
1656 Ops.push_back(getRoot());
1657 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1659 DAG.setRoot(Tmp.getValue(1));
1662 case Intrinsic::stackrestore: {
1663 SDOperand Tmp = getValue(I.getOperand(1));
1664 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1667 case Intrinsic::prefetch:
1668 // FIXME: Currently discarding prefetches.
1674 void SelectionDAGLowering::visitCall(CallInst &I) {
1675 const char *RenameFn = 0;
1676 if (Function *F = I.getCalledFunction()) {
1677 if (F->isExternal())
1678 if (unsigned IID = F->getIntrinsicID()) {
1679 RenameFn = visitIntrinsicCall(I, IID);
1682 } else { // Not an LLVM intrinsic.
1683 const std::string &Name = F->getName();
1684 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
1685 if (I.getNumOperands() == 3 && // Basic sanity checks.
1686 I.getOperand(1)->getType()->isFloatingPoint() &&
1687 I.getType() == I.getOperand(1)->getType() &&
1688 I.getType() == I.getOperand(2)->getType()) {
1689 SDOperand LHS = getValue(I.getOperand(1));
1690 SDOperand RHS = getValue(I.getOperand(2));
1691 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
1695 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1696 if (I.getNumOperands() == 2 && // Basic sanity checks.
1697 I.getOperand(1)->getType()->isFloatingPoint() &&
1698 I.getType() == I.getOperand(1)->getType()) {
1699 SDOperand Tmp = getValue(I.getOperand(1));
1700 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1703 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1704 if (I.getNumOperands() == 2 && // Basic sanity checks.
1705 I.getOperand(1)->getType()->isFloatingPoint() &&
1706 I.getType() == I.getOperand(1)->getType()) {
1707 SDOperand Tmp = getValue(I.getOperand(1));
1708 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1711 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1712 if (I.getNumOperands() == 2 && // Basic sanity checks.
1713 I.getOperand(1)->getType()->isFloatingPoint() &&
1714 I.getType() == I.getOperand(1)->getType()) {
1715 SDOperand Tmp = getValue(I.getOperand(1));
1716 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1721 } else if (isa<InlineAsm>(I.getOperand(0))) {
1728 Callee = getValue(I.getOperand(0));
1730 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1731 std::vector<std::pair<SDOperand, const Type*> > Args;
1732 Args.reserve(I.getNumOperands());
1733 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1734 Value *Arg = I.getOperand(i);
1735 SDOperand ArgNode = getValue(Arg);
1736 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1739 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1740 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1742 std::pair<SDOperand,SDOperand> Result =
1743 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1744 I.isTailCall(), Callee, Args, DAG);
1745 if (I.getType() != Type::VoidTy)
1746 setValue(&I, Result.first);
1747 DAG.setRoot(Result.second);
1750 SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
1751 SDOperand &Chain, SDOperand &Flag)const{
1752 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
1753 Chain = Val.getValue(1);
1754 Flag = Val.getValue(2);
1756 // If the result was expanded, copy from the top part.
1757 if (Regs.size() > 1) {
1758 assert(Regs.size() == 2 &&
1759 "Cannot expand to more than 2 elts yet!");
1760 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
1761 Chain = Val.getValue(1);
1762 Flag = Val.getValue(2);
1763 if (DAG.getTargetLoweringInfo().isLittleEndian())
1764 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
1766 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
1769 // Otherwise, if the return value was promoted, truncate it to the
1770 // appropriate type.
1771 if (RegVT == ValueVT)
1774 if (MVT::isInteger(RegVT))
1775 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
1777 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
1780 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
1781 /// specified value into the registers specified by this object. This uses
1782 /// Chain/Flag as the input and updates them for the output Chain/Flag.
1783 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
1784 SDOperand &Chain, SDOperand &Flag) const {
1785 if (Regs.size() == 1) {
1786 // If there is a single register and the types differ, this must be
1788 if (RegVT != ValueVT) {
1789 if (MVT::isInteger(RegVT))
1790 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
1792 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
1794 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
1795 Flag = Chain.getValue(1);
1797 std::vector<unsigned> R(Regs);
1798 if (!DAG.getTargetLoweringInfo().isLittleEndian())
1799 std::reverse(R.begin(), R.end());
1801 for (unsigned i = 0, e = R.size(); i != e; ++i) {
1802 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
1803 DAG.getConstant(i, MVT::i32));
1804 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
1805 Flag = Chain.getValue(1);
1810 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
1811 /// operand list. This adds the code marker and includes the number of
1812 /// values added into it.
1813 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
1814 std::vector<SDOperand> &Ops) const {
1815 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
1816 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
1817 Ops.push_back(DAG.getRegister(Regs[i], RegVT));
1820 /// isAllocatableRegister - If the specified register is safe to allocate,
1821 /// i.e. it isn't a stack pointer or some other special register, return the
1822 /// register class for the register. Otherwise, return null.
1823 static const TargetRegisterClass *
1824 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
1825 const TargetLowering &TLI, const MRegisterInfo *MRI) {
1826 MVT::ValueType FoundVT = MVT::Other;
1827 const TargetRegisterClass *FoundRC = 0;
1828 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
1829 E = MRI->regclass_end(); RCI != E; ++RCI) {
1830 MVT::ValueType ThisVT = MVT::Other;
1832 const TargetRegisterClass *RC = *RCI;
1833 // If none of the the value types for this register class are valid, we
1834 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1835 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1837 if (TLI.isTypeLegal(*I)) {
1838 // If we have already found this register in a different register class,
1839 // choose the one with the largest VT specified. For example, on
1840 // PowerPC, we favor f64 register classes over f32.
1841 if (FoundVT == MVT::Other ||
1842 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
1849 if (ThisVT == MVT::Other) continue;
1851 // NOTE: This isn't ideal. In particular, this might allocate the
1852 // frame pointer in functions that need it (due to them not being taken
1853 // out of allocation, because a variable sized allocation hasn't been seen
1854 // yet). This is a slight code pessimization, but should still work.
1855 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
1856 E = RC->allocation_order_end(MF); I != E; ++I)
1858 // We found a matching register class. Keep looking at others in case
1859 // we find one with larger registers that this physreg is also in.
1868 RegsForValue SelectionDAGLowering::
1869 GetRegistersForValue(const std::string &ConstrCode,
1870 MVT::ValueType VT, bool isOutReg, bool isInReg,
1871 std::set<unsigned> &OutputRegs,
1872 std::set<unsigned> &InputRegs) {
1873 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
1874 TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
1875 std::vector<unsigned> Regs;
1877 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
1878 MVT::ValueType RegVT;
1879 MVT::ValueType ValueVT = VT;
1881 if (PhysReg.first) {
1882 if (VT == MVT::Other)
1883 ValueVT = *PhysReg.second->vt_begin();
1886 // This is a explicit reference to a physical register.
1887 Regs.push_back(PhysReg.first);
1889 // If this is an expanded reference, add the rest of the regs to Regs.
1891 RegVT = *PhysReg.second->vt_begin();
1892 TargetRegisterClass::iterator I = PhysReg.second->begin();
1893 TargetRegisterClass::iterator E = PhysReg.second->end();
1894 for (; *I != PhysReg.first; ++I)
1895 assert(I != E && "Didn't find reg!");
1897 // Already added the first reg.
1899 for (; NumRegs; --NumRegs, ++I) {
1900 assert(I != E && "Ran out of registers to allocate!");
1904 return RegsForValue(Regs, RegVT, ValueVT);
1907 // This is a reference to a register class. Allocate NumRegs consecutive,
1908 // available, registers from the class.
1909 std::vector<unsigned> RegClassRegs =
1910 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
1912 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
1913 MachineFunction &MF = *CurMBB->getParent();
1914 unsigned NumAllocated = 0;
1915 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
1916 unsigned Reg = RegClassRegs[i];
1917 // See if this register is available.
1918 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
1919 (isInReg && InputRegs.count(Reg))) { // Already used.
1920 // Make sure we find consecutive registers.
1925 // Check to see if this register is allocatable (i.e. don't give out the
1927 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
1929 // Make sure we find consecutive registers.
1934 // Okay, this register is good, we can use it.
1937 // If we allocated enough consecutive
1938 if (NumAllocated == NumRegs) {
1939 unsigned RegStart = (i-NumAllocated)+1;
1940 unsigned RegEnd = i+1;
1941 // Mark all of the allocated registers used.
1942 for (unsigned i = RegStart; i != RegEnd; ++i) {
1943 unsigned Reg = RegClassRegs[i];
1944 Regs.push_back(Reg);
1945 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
1946 if (isInReg) InputRegs.insert(Reg); // Mark reg used.
1949 return RegsForValue(Regs, *RC->vt_begin(), VT);
1953 // Otherwise, we couldn't allocate enough registers for this.
1954 return RegsForValue();
1958 /// visitInlineAsm - Handle a call to an InlineAsm object.
1960 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1961 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1963 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
1966 // Note, we treat inline asms both with and without side-effects as the same.
1967 // If an inline asm doesn't have side effects and doesn't access memory, we
1968 // could not choose to not chain it.
1969 bool hasSideEffects = IA->hasSideEffects();
1971 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
1972 std::vector<MVT::ValueType> ConstraintVTs;
1974 /// AsmNodeOperands - A list of pairs. The first element is a register, the
1975 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
1976 /// if it is a def of that register.
1977 std::vector<SDOperand> AsmNodeOperands;
1978 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
1979 AsmNodeOperands.push_back(AsmStr);
1981 SDOperand Chain = getRoot();
1984 // We fully assign registers here at isel time. This is not optimal, but
1985 // should work. For register classes that correspond to LLVM classes, we
1986 // could let the LLVM RA do its thing, but we currently don't. Do a prepass
1987 // over the constraints, collecting fixed registers that we know we can't use.
1988 std::set<unsigned> OutputRegs, InputRegs;
1990 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1991 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
1992 std::string &ConstraintCode = Constraints[i].Codes[0];
1994 MVT::ValueType OpVT;
1996 // Compute the value type for each operand and add it to ConstraintVTs.
1997 switch (Constraints[i].Type) {
1998 case InlineAsm::isOutput:
1999 if (!Constraints[i].isIndirectOutput) {
2000 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2001 OpVT = TLI.getValueType(I.getType());
2003 const Type *OpTy = I.getOperand(OpNum)->getType();
2004 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
2005 OpNum++; // Consumes a call operand.
2008 case InlineAsm::isInput:
2009 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
2010 OpNum++; // Consumes a call operand.
2012 case InlineAsm::isClobber:
2017 ConstraintVTs.push_back(OpVT);
2019 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
2020 continue; // Not assigned a fixed reg.
2022 // Build a list of regs that this operand uses. This always has a single
2023 // element for promoted/expanded operands.
2024 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
2026 OutputRegs, InputRegs);
2028 switch (Constraints[i].Type) {
2029 case InlineAsm::isOutput:
2030 // We can't assign any other output to this register.
2031 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2032 // If this is an early-clobber output, it cannot be assigned to the same
2033 // value as the input reg.
2034 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2035 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2037 case InlineAsm::isInput:
2038 // We can't assign any other input to this register.
2039 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2041 case InlineAsm::isClobber:
2042 // Clobbered regs cannot be used as inputs or outputs.
2043 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2044 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2049 // Loop over all of the inputs, copying the operand values into the
2050 // appropriate registers and processing the output regs.
2051 RegsForValue RetValRegs;
2052 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
2055 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2056 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2057 std::string &ConstraintCode = Constraints[i].Codes[0];
2059 switch (Constraints[i].Type) {
2060 case InlineAsm::isOutput: {
2061 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2062 if (ConstraintCode.size() == 1) // not a physreg name.
2063 CTy = TLI.getConstraintType(ConstraintCode[0]);
2065 if (CTy == TargetLowering::C_Memory) {
2067 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2069 // Check that the operand (the address to store to) isn't a float.
2070 if (!MVT::isInteger(InOperandVal.getValueType()))
2071 assert(0 && "MATCH FAIL!");
2073 if (!Constraints[i].isIndirectOutput)
2074 assert(0 && "MATCH FAIL!");
2076 OpNum++; // Consumes a call operand.
2078 // Extend/truncate to the right pointer type if needed.
2079 MVT::ValueType PtrType = TLI.getPointerTy();
2080 if (InOperandVal.getValueType() < PtrType)
2081 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2082 else if (InOperandVal.getValueType() > PtrType)
2083 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2085 // Add information to the INLINEASM node to know about this output.
2086 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2087 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2088 AsmNodeOperands.push_back(InOperandVal);
2092 // Otherwise, this is a register output.
2093 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2095 // If this is an early-clobber output, or if there is an input
2096 // constraint that matches this, we need to reserve the input register
2097 // so no other inputs allocate to it.
2098 bool UsesInputRegister = false;
2099 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2100 UsesInputRegister = true;
2102 // Copy the output from the appropriate register. Find a register that
2105 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2106 true, UsesInputRegister,
2107 OutputRegs, InputRegs);
2108 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!");
2110 if (!Constraints[i].isIndirectOutput) {
2111 assert(RetValRegs.Regs.empty() &&
2112 "Cannot have multiple output constraints yet!");
2113 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2116 IndirectStoresToEmit.push_back(std::make_pair(Regs,
2117 I.getOperand(OpNum)));
2118 OpNum++; // Consumes a call operand.
2121 // Add information to the INLINEASM node to know that this register is
2123 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
2126 case InlineAsm::isInput: {
2127 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2128 OpNum++; // Consumes a call operand.
2130 if (isdigit(ConstraintCode[0])) { // Matching constraint?
2131 // If this is required to match an output register we have already set,
2132 // just use its register.
2133 unsigned OperandNo = atoi(ConstraintCode.c_str());
2135 // Scan until we find the definition we already emitted of this operand.
2136 // When we find it, create a RegsForValue operand.
2137 unsigned CurOp = 2; // The first operand.
2138 for (; OperandNo; --OperandNo) {
2139 // Advance to the next operand.
2141 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2142 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2143 "Skipped past definitions?");
2144 CurOp += (NumOps>>3)+1;
2148 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2149 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2150 "Skipped past definitions?");
2152 // Add NumOps>>3 registers to MatchedRegs.
2153 RegsForValue MatchedRegs;
2154 MatchedRegs.ValueVT = InOperandVal.getValueType();
2155 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
2156 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
2157 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
2158 MatchedRegs.Regs.push_back(Reg);
2161 // Use the produced MatchedRegs object to
2162 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
2163 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
2167 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2168 if (ConstraintCode.size() == 1) // not a physreg name.
2169 CTy = TLI.getConstraintType(ConstraintCode[0]);
2171 if (CTy == TargetLowering::C_Other) {
2172 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
2173 assert(0 && "MATCH FAIL!");
2175 // Add information to the INLINEASM node to know about this input.
2176 unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
2177 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2178 AsmNodeOperands.push_back(InOperandVal);
2180 } else if (CTy == TargetLowering::C_Memory) {
2183 // Check that the operand isn't a float.
2184 if (!MVT::isInteger(InOperandVal.getValueType()))
2185 assert(0 && "MATCH FAIL!");
2187 // Extend/truncate to the right pointer type if needed.
2188 MVT::ValueType PtrType = TLI.getPointerTy();
2189 if (InOperandVal.getValueType() < PtrType)
2190 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2191 else if (InOperandVal.getValueType() > PtrType)
2192 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2194 // Add information to the INLINEASM node to know about this input.
2195 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2196 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2197 AsmNodeOperands.push_back(InOperandVal);
2201 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2203 // Copy the input into the appropriate registers.
2204 RegsForValue InRegs =
2205 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2206 false, true, OutputRegs, InputRegs);
2207 // FIXME: should be match fail.
2208 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
2210 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
2212 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
2215 case InlineAsm::isClobber: {
2216 RegsForValue ClobberedRegs =
2217 GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
2218 OutputRegs, InputRegs);
2219 // Add the clobbered value to the operand list, so that the register
2220 // allocator is aware that the physreg got clobbered.
2221 if (!ClobberedRegs.Regs.empty())
2222 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
2228 // Finish up input operands.
2229 AsmNodeOperands[0] = Chain;
2230 if (Flag.Val) AsmNodeOperands.push_back(Flag);
2232 std::vector<MVT::ValueType> VTs;
2233 VTs.push_back(MVT::Other);
2234 VTs.push_back(MVT::Flag);
2235 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
2236 Flag = Chain.getValue(1);
2238 // If this asm returns a register value, copy the result from that register
2239 // and set it as the value of the call.
2240 if (!RetValRegs.Regs.empty())
2241 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
2243 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
2245 // Process indirect outputs, first output all of the flagged copies out of
2247 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
2248 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
2249 Value *Ptr = IndirectStoresToEmit[i].second;
2250 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
2251 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
2254 // Emit the non-flagged stores from the physregs.
2255 std::vector<SDOperand> OutChains;
2256 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
2257 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
2258 StoresToEmit[i].first,
2259 getValue(StoresToEmit[i].second),
2260 DAG.getSrcValue(StoresToEmit[i].second)));
2261 if (!OutChains.empty())
2262 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
2267 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
2268 SDOperand Src = getValue(I.getOperand(0));
2270 MVT::ValueType IntPtr = TLI.getPointerTy();
2272 if (IntPtr < Src.getValueType())
2273 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
2274 else if (IntPtr > Src.getValueType())
2275 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
2277 // Scale the source by the type size.
2278 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
2279 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
2280 Src, getIntPtrConstant(ElementSize));
2282 std::vector<std::pair<SDOperand, const Type*> > Args;
2283 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
2285 std::pair<SDOperand,SDOperand> Result =
2286 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
2287 DAG.getExternalSymbol("malloc", IntPtr),
2289 setValue(&I, Result.first); // Pointers always fit in registers
2290 DAG.setRoot(Result.second);
2293 void SelectionDAGLowering::visitFree(FreeInst &I) {
2294 std::vector<std::pair<SDOperand, const Type*> > Args;
2295 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
2296 TLI.getTargetData().getIntPtrType()));
2297 MVT::ValueType IntPtr = TLI.getPointerTy();
2298 std::pair<SDOperand,SDOperand> Result =
2299 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
2300 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
2301 DAG.setRoot(Result.second);
2304 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
2305 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
2306 // instructions are special in various ways, which require special support to
2307 // insert. The specified MachineInstr is created but not inserted into any
2308 // basic blocks, and the scheduler passes ownership of it to this method.
2309 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2310 MachineBasicBlock *MBB) {
2311 std::cerr << "If a target marks an instruction with "
2312 "'usesCustomDAGSchedInserter', it must implement "
2313 "TargetLowering::InsertAtEndOfBasicBlock!\n";
2318 void SelectionDAGLowering::visitVAStart(CallInst &I) {
2319 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
2320 getValue(I.getOperand(1)),
2321 DAG.getSrcValue(I.getOperand(1))));
2324 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
2325 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
2326 getValue(I.getOperand(0)),
2327 DAG.getSrcValue(I.getOperand(0)));
2329 DAG.setRoot(V.getValue(1));
2332 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
2333 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
2334 getValue(I.getOperand(1)),
2335 DAG.getSrcValue(I.getOperand(1))));
2338 void SelectionDAGLowering::visitVACopy(CallInst &I) {
2339 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
2340 getValue(I.getOperand(1)),
2341 getValue(I.getOperand(2)),
2342 DAG.getSrcValue(I.getOperand(1)),
2343 DAG.getSrcValue(I.getOperand(2))));
2346 /// TargetLowering::LowerArguments - This is the default LowerArguments
2347 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
2348 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be removed.
2349 std::vector<SDOperand>
2350 TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
2351 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
2352 std::vector<SDOperand> Ops;
2353 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
2354 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
2356 // Add one result value for each formal argument.
2357 std::vector<MVT::ValueType> RetVals;
2358 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2359 MVT::ValueType VT = getValueType(I->getType());
2361 switch (getTypeAction(VT)) {
2362 default: assert(0 && "Unknown type action!");
2364 RetVals.push_back(VT);
2367 RetVals.push_back(getTypeToTransformTo(VT));
2370 if (VT != MVT::Vector) {
2371 // If this is a large integer, it needs to be broken up into small
2372 // integers. Figure out what the destination type is and how many small
2373 // integers it turns into.
2374 MVT::ValueType NVT = getTypeToTransformTo(VT);
2375 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2376 for (unsigned i = 0; i != NumVals; ++i)
2377 RetVals.push_back(NVT);
2379 // Otherwise, this is a vector type. We only support legal vectors
2381 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
2382 const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
2384 // Figure out if there is a Packed type corresponding to this Vector
2385 // type. If so, convert to the packed type.
2386 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2387 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2388 RetVals.push_back(TVT);
2390 assert(0 && "Don't support illegal by-val vector arguments yet!");
2397 if (RetVals.size() == 0)
2398 RetVals.push_back(MVT::isVoid);
2401 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, RetVals, Ops).Val;
2403 // Set up the return result vector.
2406 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2407 MVT::ValueType VT = getValueType(I->getType());
2409 switch (getTypeAction(VT)) {
2410 default: assert(0 && "Unknown type action!");
2412 Ops.push_back(SDOperand(Result, i++));
2415 SDOperand Op(Result, i++);
2416 if (MVT::isInteger(VT)) {
2417 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
2419 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT));
2420 Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
2422 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2423 Op = DAG.getNode(ISD::FP_ROUND, VT, Op);
2429 if (VT != MVT::Vector) {
2430 // If this is a large integer, it needs to be reassembled from small
2431 // integers. Figure out what the source elt type is and how many small
2433 MVT::ValueType NVT = getTypeToTransformTo(VT);
2434 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2436 SDOperand Lo = SDOperand(Result, i++);
2437 SDOperand Hi = SDOperand(Result, i++);
2439 if (!isLittleEndian())
2442 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi));
2444 // Value scalarized into many values. Unimp for now.
2445 assert(0 && "Cannot expand i64 -> i16 yet!");
2448 // Otherwise, this is a vector type. We only support legal vectors
2450 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
2451 const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
2453 // Figure out if there is a Packed type corresponding to this Vector
2454 // type. If so, convert to the packed type.
2455 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2456 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2457 Ops.push_back(SDOperand(Result, i++));
2459 assert(0 && "Don't support illegal by-val vector arguments yet!");
2468 // It is always conservatively correct for llvm.returnaddress and
2469 // llvm.frameaddress to return 0.
2470 std::pair<SDOperand, SDOperand>
2471 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
2472 unsigned Depth, SelectionDAG &DAG) {
2473 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
2476 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2477 assert(0 && "LowerOperation not implemented for this target!");
2482 SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
2483 SelectionDAG &DAG) {
2484 assert(0 && "CustomPromoteOperation not implemented for this target!");
2489 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
2490 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
2491 std::pair<SDOperand,SDOperand> Result =
2492 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
2493 setValue(&I, Result.first);
2494 DAG.setRoot(Result.second);
2497 /// getMemsetValue - Vectorized representation of the memset value
2499 static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
2500 SelectionDAG &DAG) {
2501 MVT::ValueType CurVT = VT;
2502 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
2503 uint64_t Val = C->getValue() & 255;
2505 while (CurVT != MVT::i8) {
2506 Val = (Val << Shift) | Val;
2508 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2510 return DAG.getConstant(Val, VT);
2512 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
2514 while (CurVT != MVT::i8) {
2516 DAG.getNode(ISD::OR, VT,
2517 DAG.getNode(ISD::SHL, VT, Value,
2518 DAG.getConstant(Shift, MVT::i8)), Value);
2520 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2527 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
2528 /// used when a memcpy is turned into a memset when the source is a constant
2530 static SDOperand getMemsetStringVal(MVT::ValueType VT,
2531 SelectionDAG &DAG, TargetLowering &TLI,
2532 std::string &Str, unsigned Offset) {
2533 MVT::ValueType CurVT = VT;
2535 unsigned MSB = getSizeInBits(VT) / 8;
2536 if (TLI.isLittleEndian())
2537 Offset = Offset + MSB - 1;
2538 for (unsigned i = 0; i != MSB; ++i) {
2539 Val = (Val << 8) | Str[Offset];
2540 Offset += TLI.isLittleEndian() ? -1 : 1;
2542 return DAG.getConstant(Val, VT);
2545 /// getMemBasePlusOffset - Returns base and offset node for the
2546 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
2547 SelectionDAG &DAG, TargetLowering &TLI) {
2548 MVT::ValueType VT = Base.getValueType();
2549 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
2552 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
2553 /// to replace the memset / memcpy is below the threshold. It also returns the
2554 /// types of the sequence of memory ops to perform memset / memcpy.
2555 static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
2556 unsigned Limit, uint64_t Size,
2557 unsigned Align, TargetLowering &TLI) {
2560 if (TLI.allowsUnalignedMemoryAccesses()) {
2563 switch (Align & 7) {
2579 MVT::ValueType LVT = MVT::i64;
2580 while (!TLI.isTypeLegal(LVT))
2581 LVT = (MVT::ValueType)((unsigned)LVT - 1);
2582 assert(MVT::isInteger(LVT));
2587 unsigned NumMemOps = 0;
2589 unsigned VTSize = getSizeInBits(VT) / 8;
2590 while (VTSize > Size) {
2591 VT = (MVT::ValueType)((unsigned)VT - 1);
2594 assert(MVT::isInteger(VT));
2596 if (++NumMemOps > Limit)
2598 MemOps.push_back(VT);
2605 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
2606 SDOperand Op1 = getValue(I.getOperand(1));
2607 SDOperand Op2 = getValue(I.getOperand(2));
2608 SDOperand Op3 = getValue(I.getOperand(3));
2609 SDOperand Op4 = getValue(I.getOperand(4));
2610 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
2611 if (Align == 0) Align = 1;
2613 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
2614 std::vector<MVT::ValueType> MemOps;
2616 // Expand memset / memcpy to a series of load / store ops
2617 // if the size operand falls below a certain threshold.
2618 std::vector<SDOperand> OutChains;
2620 default: break; // Do nothing for now.
2622 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
2623 Size->getValue(), Align, TLI)) {
2624 unsigned NumMemOps = MemOps.size();
2625 unsigned Offset = 0;
2626 for (unsigned i = 0; i < NumMemOps; i++) {
2627 MVT::ValueType VT = MemOps[i];
2628 unsigned VTSize = getSizeInBits(VT) / 8;
2629 SDOperand Value = getMemsetValue(Op2, VT, DAG);
2630 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(),
2632 getMemBasePlusOffset(Op1, Offset, DAG, TLI),
2633 DAG.getSrcValue(I.getOperand(1), Offset));
2634 OutChains.push_back(Store);
2641 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
2642 Size->getValue(), Align, TLI)) {
2643 unsigned NumMemOps = MemOps.size();
2644 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
2645 GlobalAddressSDNode *G = NULL;
2647 bool CopyFromStr = false;
2649 if (Op2.getOpcode() == ISD::GlobalAddress)
2650 G = cast<GlobalAddressSDNode>(Op2);
2651 else if (Op2.getOpcode() == ISD::ADD &&
2652 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
2653 Op2.getOperand(1).getOpcode() == ISD::Constant) {
2654 G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
2655 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
2658 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
2660 Str = GV->getStringValue(false);
2668 for (unsigned i = 0; i < NumMemOps; i++) {
2669 MVT::ValueType VT = MemOps[i];
2670 unsigned VTSize = getSizeInBits(VT) / 8;
2671 SDOperand Value, Chain, Store;
2674 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
2677 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2678 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2679 DAG.getSrcValue(I.getOperand(1), DstOff));
2681 Value = DAG.getLoad(VT, getRoot(),
2682 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
2683 DAG.getSrcValue(I.getOperand(2), SrcOff));
2684 Chain = Value.getValue(1);
2686 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2687 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2688 DAG.getSrcValue(I.getOperand(1), DstOff));
2690 OutChains.push_back(Store);
2699 if (!OutChains.empty()) {
2700 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains));
2705 std::vector<SDOperand> Ops;
2706 Ops.push_back(getRoot());
2711 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
2714 //===----------------------------------------------------------------------===//
2715 // SelectionDAGISel code
2716 //===----------------------------------------------------------------------===//
2718 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
2719 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
2722 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
2723 // FIXME: we only modify the CFG to split critical edges. This
2724 // updates dom and loop info.
2728 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
2729 /// casting to the type of GEPI.
2730 static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
2731 Value *Ptr, Value *PtrOffset) {
2732 if (V) return V; // Already computed.
2734 BasicBlock::iterator InsertPt;
2735 if (BB == GEPI->getParent()) {
2736 // If insert into the GEP's block, insert right after the GEP.
2740 // Otherwise, insert at the top of BB, after any PHI nodes
2741 InsertPt = BB->begin();
2742 while (isa<PHINode>(InsertPt)) ++InsertPt;
2745 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
2746 // BB so that there is only one value live across basic blocks (the cast
2748 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
2749 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
2750 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
2752 // Add the offset, cast it to the right type.
2753 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
2754 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
2759 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
2760 /// selection, we want to be a bit careful about some things. In particular, if
2761 /// we have a GEP instruction that is used in a different block than it is
2762 /// defined, the addressing expression of the GEP cannot be folded into loads or
2763 /// stores that use it. In this case, decompose the GEP and move constant
2764 /// indices into blocks that use it.
2765 static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
2766 const TargetData &TD) {
2767 // If this GEP is only used inside the block it is defined in, there is no
2768 // need to rewrite it.
2769 bool isUsedOutsideDefBB = false;
2770 BasicBlock *DefBB = GEPI->getParent();
2771 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
2773 if (cast<Instruction>(*UI)->getParent() != DefBB) {
2774 isUsedOutsideDefBB = true;
2778 if (!isUsedOutsideDefBB) return;
2780 // If this GEP has no non-zero constant indices, there is nothing we can do,
2782 bool hasConstantIndex = false;
2783 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
2784 E = GEPI->op_end(); OI != E; ++OI) {
2785 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
2786 if (CI->getRawValue()) {
2787 hasConstantIndex = true;
2791 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
2792 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
2794 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
2795 // constant offset (which we now know is non-zero) and deal with it later.
2796 uint64_t ConstantOffset = 0;
2797 const Type *UIntPtrTy = TD.getIntPtrType();
2798 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
2799 const Type *Ty = GEPI->getOperand(0)->getType();
2801 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
2802 E = GEPI->op_end(); OI != E; ++OI) {
2804 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2805 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
2807 ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
2808 Ty = StTy->getElementType(Field);
2810 Ty = cast<SequentialType>(Ty)->getElementType();
2812 // Handle constant subscripts.
2813 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2814 if (CI->getRawValue() == 0) continue;
2816 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
2817 ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
2819 ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
2823 // Ptr = Ptr + Idx * ElementSize;
2825 // Cast Idx to UIntPtrTy if needed.
2826 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
2828 uint64_t ElementSize = TD.getTypeSize(Ty);
2829 // Mask off bits that should not be set.
2830 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2831 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
2833 // Multiply by the element size and add to the base.
2834 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
2835 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
2839 // Make sure that the offset fits in uintptr_t.
2840 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2841 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
2843 // Okay, we have now emitted all of the variable index parts to the BB that
2844 // the GEP is defined in. Loop over all of the using instructions, inserting
2845 // an "add Ptr, ConstantOffset" into each block that uses it and update the
2846 // instruction to use the newly computed value, making GEPI dead. When the
2847 // user is a load or store instruction address, we emit the add into the user
2848 // block, otherwise we use a canonical version right next to the gep (these
2849 // won't be foldable as addresses, so we might as well share the computation).
2851 std::map<BasicBlock*,Value*> InsertedExprs;
2852 while (!GEPI->use_empty()) {
2853 Instruction *User = cast<Instruction>(GEPI->use_back());
2855 // If this use is not foldable into the addressing mode, use a version
2856 // emitted in the GEP block.
2858 if (!isa<LoadInst>(User) &&
2859 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
2860 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
2863 // Otherwise, insert the code in the User's block so it can be folded into
2864 // any users in that block.
2865 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
2866 User->getParent(), GEPI,
2869 User->replaceUsesOfWith(GEPI, NewVal);
2872 // Finally, the GEP is dead, remove it.
2873 GEPI->eraseFromParent();
2876 bool SelectionDAGISel::runOnFunction(Function &Fn) {
2877 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
2878 RegMap = MF.getSSARegMap();
2879 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
2881 // First, split all critical edges for PHI nodes with incoming values that are
2882 // constants, this way the load of the constant into a vreg will not be placed
2883 // into MBBs that are used some other way.
2885 // In this pass we also look for GEP instructions that are used across basic
2886 // blocks and rewrites them to improve basic-block-at-a-time selection.
2888 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
2890 BasicBlock::iterator BBI;
2891 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
2892 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2893 if (isa<Constant>(PN->getIncomingValue(i)))
2894 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
2896 for (BasicBlock::iterator E = BB->end(); BBI != E; )
2897 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
2898 OptimizeGEPExpression(GEPI, TLI.getTargetData());
2901 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
2903 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
2904 SelectBasicBlock(I, MF, FuncInfo);
2910 SDOperand SelectionDAGISel::
2911 CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
2912 SDOperand Op = SDL.getValue(V);
2913 assert((Op.getOpcode() != ISD::CopyFromReg ||
2914 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
2915 "Copy from a reg to the same reg!");
2917 // If this type is not legal, we must make sure to not create an invalid
2919 MVT::ValueType SrcVT = Op.getValueType();
2920 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
2921 SelectionDAG &DAG = SDL.DAG;
2922 if (SrcVT == DestVT) {
2923 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
2924 } else if (SrcVT == MVT::Vector) {
2925 // Handle copies from generic vectors to registers.
2926 MVT::ValueType PTyElementVT, PTyLegalElementVT;
2927 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
2928 PTyElementVT, PTyLegalElementVT);
2930 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
2931 // MVT::Vector type.
2932 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
2933 DAG.getConstant(NE, MVT::i32),
2934 DAG.getValueType(PTyElementVT));
2936 // Loop over all of the elements of the resultant vector,
2937 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
2938 // copying them into output registers.
2939 std::vector<SDOperand> OutChains;
2940 SDOperand Root = SDL.getRoot();
2941 for (unsigned i = 0; i != NE; ++i) {
2942 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
2943 Op, DAG.getConstant(i, MVT::i32));
2944 if (PTyElementVT == PTyLegalElementVT) {
2945 // Elements are legal.
2946 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
2947 } else if (PTyLegalElementVT > PTyElementVT) {
2948 // Elements are promoted.
2949 if (MVT::isFloatingPoint(PTyLegalElementVT))
2950 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
2952 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
2953 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
2955 // Elements are expanded.
2956 // The src value is expanded into multiple registers.
2957 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
2958 Elt, DAG.getConstant(0, MVT::i32));
2959 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
2960 Elt, DAG.getConstant(1, MVT::i32));
2961 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
2962 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
2965 return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
2966 } else if (SrcVT < DestVT) {
2967 // The src value is promoted to the register.
2968 if (MVT::isFloatingPoint(SrcVT))
2969 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
2971 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
2972 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
2974 // The src value is expanded into multiple registers.
2975 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
2976 Op, DAG.getConstant(0, MVT::i32));
2977 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
2978 Op, DAG.getConstant(1, MVT::i32));
2979 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
2980 return DAG.getCopyToReg(Op, Reg+1, Hi);
2984 void SelectionDAGISel::
2985 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
2986 std::vector<SDOperand> &UnorderedChains) {
2987 // If this is the entry block, emit arguments.
2988 Function &F = *BB->getParent();
2989 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
2990 SDOperand OldRoot = SDL.DAG.getRoot();
2991 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
2994 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
2996 if (!AI->use_empty()) {
2997 SDL.setValue(AI, Args[a]);
2999 MVT::ValueType VT = TLI.getValueType(AI->getType());
3000 if (VT == MVT::Vector) {
3001 // Insert a VBIT_CONVERT between the FORMAL_ARGUMENT node and its uses.
3002 // Or else legalizer will balk.
3003 BasicBlock::iterator InsertPt = BB->begin();
3004 Value *NewVal = new CastInst(AI, AI->getType(), AI->getName(), InsertPt);
3005 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
3007 Instruction *User = cast<Instruction>(*UI);
3009 User->replaceUsesOfWith(AI, NewVal);
3012 // If this argument is live outside of the entry block, insert a copy from
3013 // whereever we got it to the vreg that other BB's will reference it as.
3014 if (FuncInfo.ValueMap.count(AI)) {
3016 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
3017 UnorderedChains.push_back(Copy);
3021 // Next, if the function has live ins that need to be copied into vregs,
3022 // emit the copies now, into the top of the block.
3023 MachineFunction &MF = SDL.DAG.getMachineFunction();
3024 if (MF.livein_begin() != MF.livein_end()) {
3025 SSARegMap *RegMap = MF.getSSARegMap();
3026 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo();
3027 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
3028 E = MF.livein_end(); LI != E; ++LI)
3030 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
3031 LI->first, RegMap->getRegClass(LI->second));
3034 // Finally, if the target has anything special to do, allow it to do so.
3035 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
3039 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
3040 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
3041 FunctionLoweringInfo &FuncInfo) {
3042 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
3044 std::vector<SDOperand> UnorderedChains;
3046 // Lower any arguments needed in this block if this is the entry block.
3047 if (LLVMBB == &LLVMBB->getParent()->front())
3048 LowerArguments(LLVMBB, SDL, UnorderedChains);
3050 BB = FuncInfo.MBBMap[LLVMBB];
3051 SDL.setCurrentBasicBlock(BB);
3053 // Lower all of the non-terminator instructions.
3054 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
3058 // Ensure that all instructions which are used outside of their defining
3059 // blocks are available as virtual registers.
3060 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
3061 if (!I->use_empty() && !isa<PHINode>(I)) {
3062 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
3063 if (VMI != FuncInfo.ValueMap.end())
3064 UnorderedChains.push_back(
3065 CopyValueToVirtualRegister(SDL, I, VMI->second));
3068 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
3069 // ensure constants are generated when needed. Remember the virtual registers
3070 // that need to be added to the Machine PHI nodes as input. We cannot just
3071 // directly add them, because expansion might result in multiple MBB's for one
3072 // BB. As such, the start of the BB might correspond to a different MBB than
3076 // Emit constants only once even if used by multiple PHI nodes.
3077 std::map<Constant*, unsigned> ConstantsOut;
3079 // Check successor nodes PHI nodes that expect a constant to be available from
3081 TerminatorInst *TI = LLVMBB->getTerminator();
3082 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
3083 BasicBlock *SuccBB = TI->getSuccessor(succ);
3084 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
3087 // At this point we know that there is a 1-1 correspondence between LLVM PHI
3088 // nodes and Machine PHI nodes, but the incoming operands have not been
3090 for (BasicBlock::iterator I = SuccBB->begin();
3091 (PN = dyn_cast<PHINode>(I)); ++I)
3092 if (!PN->use_empty()) {
3094 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
3095 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
3096 unsigned &RegOut = ConstantsOut[C];
3098 RegOut = FuncInfo.CreateRegForValue(C);
3099 UnorderedChains.push_back(
3100 CopyValueToVirtualRegister(SDL, C, RegOut));
3104 Reg = FuncInfo.ValueMap[PHIOp];
3106 assert(isa<AllocaInst>(PHIOp) &&
3107 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
3108 "Didn't codegen value into a register!??");
3109 Reg = FuncInfo.CreateRegForValue(PHIOp);
3110 UnorderedChains.push_back(
3111 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
3115 // Remember that this register needs to added to the machine PHI node as
3116 // the input for this MBB.
3117 MVT::ValueType VT = TLI.getValueType(PN->getType());
3118 unsigned NumElements;
3119 if (VT != MVT::Vector)
3120 NumElements = TLI.getNumElements(VT);
3122 MVT::ValueType VT1,VT2;
3124 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
3127 for (unsigned i = 0, e = NumElements; i != e; ++i)
3128 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
3131 ConstantsOut.clear();
3133 // Turn all of the unordered chains into one factored node.
3134 if (!UnorderedChains.empty()) {
3135 SDOperand Root = SDL.getRoot();
3136 if (Root.getOpcode() != ISD::EntryToken) {
3137 unsigned i = 0, e = UnorderedChains.size();
3138 for (; i != e; ++i) {
3139 assert(UnorderedChains[i].Val->getNumOperands() > 1);
3140 if (UnorderedChains[i].Val->getOperand(0) == Root)
3141 break; // Don't add the root if we already indirectly depend on it.
3145 UnorderedChains.push_back(Root);
3147 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
3150 // Lower the terminator after the copies are emitted.
3151 SDL.visit(*LLVMBB->getTerminator());
3153 // Copy over any CaseBlock records that may now exist due to SwitchInst
3154 // lowering, as well as any jump table information.
3155 SwitchCases.clear();
3156 SwitchCases = SDL.SwitchCases;
3159 // Make sure the root of the DAG is up-to-date.
3160 DAG.setRoot(SDL.getRoot());
3163 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
3164 // Run the DAG combiner in pre-legalize mode.
3167 DEBUG(std::cerr << "Lowered selection DAG:\n");
3170 // Second step, hack on the DAG until it only uses operations and types that
3171 // the target supports.
3174 DEBUG(std::cerr << "Legalized selection DAG:\n");
3177 // Run the DAG combiner in post-legalize mode.
3180 if (ViewISelDAGs) DAG.viewGraph();
3183 FoldNodeInFlight = !NoFoldNodeInFlight;
3185 // Third, instruction select all of the operations to machine code, adding the
3186 // code to the MachineBasicBlock.
3187 InstructionSelectBasicBlock(DAG);
3189 DEBUG(std::cerr << "Selected machine code:\n");
3193 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
3194 FunctionLoweringInfo &FuncInfo) {
3195 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
3197 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3200 // First step, lower LLVM code to some DAG. This DAG may use operations and
3201 // types that are not supported by the target.
3202 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
3204 // Second step, emit the lowered DAG as machine code.
3205 CodeGenAndEmitDAG(DAG);
3208 // Next, now that we know what the last MBB the LLVM BB expanded is, update
3209 // PHI nodes in successors.
3210 if (SwitchCases.empty() && JT.Reg == 0) {
3211 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
3212 MachineInstr *PHI = PHINodesToUpdate[i].first;
3213 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3214 "This is not a machine PHI node that we are updating!");
3215 PHI->addRegOperand(PHINodesToUpdate[i].second);
3216 PHI->addMachineBasicBlockOperand(BB);
3221 // If the JumpTable record is filled in, then we need to emit a jump table.
3222 // Updating the PHI nodes is tricky in this case, since we need to determine
3223 // whether the PHI is a successor of the range check MBB or the jump table MBB
3225 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
3226 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3228 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3229 MachineBasicBlock *RangeBB = BB;
3230 // Set the current basic block to the mbb we wish to insert the code into
3232 SDL.setCurrentBasicBlock(BB);
3234 SDL.visitJumpTable(JT);
3235 SDAG.setRoot(SDL.getRoot());
3236 CodeGenAndEmitDAG(SDAG);
3238 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
3239 MachineInstr *PHI = PHINodesToUpdate[pi].first;
3240 MachineBasicBlock *PHIBB = PHI->getParent();
3241 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3242 "This is not a machine PHI node that we are updating!");
3243 if (PHIBB == JT.Default || JT.SuccMBBs.find(PHIBB) != JT.SuccMBBs.end()) {
3244 PHIBB = (PHIBB == JT.Default) ? RangeBB : BB;
3245 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3246 PHI->addMachineBasicBlockOperand(PHIBB);
3252 // If we generated any switch lowering information, build and codegen any
3253 // additional DAGs necessary.
3254 for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
3255 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3257 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3258 // Set the current basic block to the mbb we wish to insert the code into
3259 BB = SwitchCases[i].ThisBB;
3260 SDL.setCurrentBasicBlock(BB);
3262 SDL.visitSwitchCase(SwitchCases[i]);
3263 SDAG.setRoot(SDL.getRoot());
3264 CodeGenAndEmitDAG(SDAG);
3265 // Iterate over the phi nodes, if there is a phi node in a successor of this
3266 // block (for instance, the default block), then add a pair of operands to
3267 // the phi node for this block, as if we were coming from the original
3268 // BB before switch expansion.
3269 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
3270 MachineInstr *PHI = PHINodesToUpdate[pi].first;
3271 MachineBasicBlock *PHIBB = PHI->getParent();
3272 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3273 "This is not a machine PHI node that we are updating!");
3274 if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) {
3275 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3276 PHI->addMachineBasicBlockOperand(BB);
3282 //===----------------------------------------------------------------------===//
3283 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
3284 /// target node in the graph.
3285 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
3286 if (ViewSchedDAGs) DAG.viewGraph();
3287 ScheduleDAG *SL = NULL;
3289 switch (ISHeuristic) {
3290 default: assert(0 && "Unrecognized scheduling heuristic");
3291 case defaultScheduling:
3292 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
3293 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
3295 assert(TLI.getSchedulingPreference() ==
3296 TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
3297 SL = createBURRListDAGScheduler(DAG, BB);
3301 SL = createBFS_DAGScheduler(DAG, BB);
3303 case simpleScheduling:
3304 SL = createSimpleDAGScheduler(false, DAG, BB);
3306 case simpleNoItinScheduling:
3307 SL = createSimpleDAGScheduler(true, DAG, BB);
3309 case listSchedulingBURR:
3310 SL = createBURRListDAGScheduler(DAG, BB);
3312 case listSchedulingTD:
3313 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
3320 HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
3321 return new HazardRecognizer();
3324 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
3325 /// by tblgen. Others should not call it.
3326 void SelectionDAGISel::
3327 SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
3328 std::vector<SDOperand> InOps;
3329 std::swap(InOps, Ops);
3331 Ops.push_back(InOps[0]); // input chain.
3332 Ops.push_back(InOps[1]); // input asm string.
3334 const char *AsmStr = cast<ExternalSymbolSDNode>(InOps[1])->getSymbol();
3335 unsigned i = 2, e = InOps.size();
3336 if (InOps[e-1].getValueType() == MVT::Flag)
3337 --e; // Don't process a flag operand if it is here.
3340 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
3341 if ((Flags & 7) != 4 /*MEM*/) {
3342 // Just skip over this operand, copying the operands verbatim.
3343 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
3344 i += (Flags >> 3) + 1;
3346 assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
3347 // Otherwise, this is a memory operand. Ask the target to select it.
3348 std::vector<SDOperand> SelOps;
3349 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
3350 std::cerr << "Could not match memory address. Inline asm failure!\n";
3354 // Add this to the output node.
3355 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
3356 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
3361 // Add the flag input back if present.
3362 if (e != InOps.size())
3363 Ops.push_back(InOps.back());