1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/CodeGen/SelectionDAGISel.h"
16 #include "llvm/CodeGen/ScheduleDAG.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/InlineAsm.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/CodeGen/IntrinsicLowering.h"
27 #include "llvm/CodeGen/MachineDebugInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/SelectionDAG.h"
32 #include "llvm/CodeGen/SSARegMap.h"
33 #include "llvm/Target/MRegisterInfo.h"
34 #include "llvm/Target/TargetData.h"
35 #include "llvm/Target/TargetFrameInfo.h"
36 #include "llvm/Target/TargetInstrInfo.h"
37 #include "llvm/Target/TargetLowering.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/Debug.h"
51 ViewISelDAGs("view-isel-dags", cl::Hidden,
52 cl::desc("Pop up a window to show isel dags as they are selected"));
54 ViewSchedDAGs("view-sched-dags", cl::Hidden,
55 cl::desc("Pop up a window to show sched dags as they are processed"));
57 static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
60 // Scheduling heuristics
61 enum SchedHeuristics {
62 defaultScheduling, // Let the target specify its preference.
63 noScheduling, // No scheduling, emit breadth first sequence.
64 simpleScheduling, // Two pass, min. critical path, max. utilization.
65 simpleNoItinScheduling, // Same as above exact using generic latency.
66 listSchedulingBURR, // Bottom up reg reduction list scheduling.
67 listSchedulingTD // Top-down list scheduler.
71 cl::opt<SchedHeuristics>
74 cl::desc("Choose scheduling style"),
75 cl::init(defaultScheduling),
77 clEnumValN(defaultScheduling, "default",
78 "Target preferred scheduling style"),
79 clEnumValN(noScheduling, "none",
80 "No scheduling: breadth first sequencing"),
81 clEnumValN(simpleScheduling, "simple",
82 "Simple two pass scheduling: minimize critical path "
83 "and maximize processor utilization"),
84 clEnumValN(simpleNoItinScheduling, "simple-noitin",
85 "Simple two pass scheduling: Same as simple "
86 "except using generic latency"),
87 clEnumValN(listSchedulingBURR, "list-burr",
88 "Bottom up register reduction list scheduling"),
89 clEnumValN(listSchedulingTD, "list-td",
90 "Top-down list scheduler"),
95 /// RegsForValue - This struct represents the physical registers that a
96 /// particular value is assigned and the type information about the value.
97 /// This is needed because values can be promoted into larger registers and
98 /// expanded into multiple smaller registers than the value.
100 /// Regs - This list hold the register (for legal and promoted values)
101 /// or register set (for expanded values) that the value should be assigned
103 std::vector<unsigned> Regs;
105 /// RegVT - The value type of each register.
107 MVT::ValueType RegVT;
109 /// ValueVT - The value type of the LLVM value, which may be promoted from
110 /// RegVT or made from merging the two expanded parts.
111 MVT::ValueType ValueVT;
113 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
115 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
116 : RegVT(regvt), ValueVT(valuevt) {
119 RegsForValue(const std::vector<unsigned> ®s,
120 MVT::ValueType regvt, MVT::ValueType valuevt)
121 : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
124 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
125 /// this value and returns the result as a ValueVT value. This uses
126 /// Chain/Flag as the input and updates them for the output Chain/Flag.
127 SDOperand getCopyFromRegs(SelectionDAG &DAG,
128 SDOperand &Chain, SDOperand &Flag) const;
130 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
131 /// specified value into the registers specified by this object. This uses
132 /// Chain/Flag as the input and updates them for the output Chain/Flag.
133 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
134 SDOperand &Chain, SDOperand &Flag) const;
136 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
137 /// operand list. This adds the code marker and includes the number of
138 /// values added into it.
139 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
140 std::vector<SDOperand> &Ops) const;
145 //===--------------------------------------------------------------------===//
146 /// FunctionLoweringInfo - This contains information that is global to a
147 /// function that is used when lowering a region of the function.
148 class FunctionLoweringInfo {
155 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
157 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
158 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
160 /// ValueMap - Since we emit code for the function a basic block at a time,
161 /// we must remember which virtual registers hold the values for
162 /// cross-basic-block values.
163 std::map<const Value*, unsigned> ValueMap;
165 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
166 /// the entry block. This allows the allocas to be efficiently referenced
167 /// anywhere in the function.
168 std::map<const AllocaInst*, int> StaticAllocaMap;
170 unsigned MakeReg(MVT::ValueType VT) {
171 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
174 unsigned CreateRegForValue(const Value *V);
176 unsigned InitializeRegForValue(const Value *V) {
177 unsigned &R = ValueMap[V];
178 assert(R == 0 && "Already initialized this value register!");
179 return R = CreateRegForValue(V);
184 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
185 /// PHI nodes or outside of the basic block that defines it, or used by a
186 /// switch instruction, which may expand to multiple basic blocks.
187 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
188 if (isa<PHINode>(I)) return true;
189 BasicBlock *BB = I->getParent();
190 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
191 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
192 isa<SwitchInst>(*UI))
197 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
198 /// entry block, return true. This includes arguments used by switches, since
199 /// the switch may expand into multiple basic blocks.
200 static bool isOnlyUsedInEntryBlock(Argument *A) {
201 BasicBlock *Entry = A->getParent()->begin();
202 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
203 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
204 return false; // Use not in entry block.
208 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
209 Function &fn, MachineFunction &mf)
210 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
212 // Create a vreg for each argument register that is not dead and is used
213 // outside of the entry block for the function.
214 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
216 if (!isOnlyUsedInEntryBlock(AI))
217 InitializeRegForValue(AI);
219 // Initialize the mapping of values to registers. This is only set up for
220 // instruction values that are used outside of the block that defines
222 Function::iterator BB = Fn.begin(), EB = Fn.end();
223 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
224 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
225 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
226 const Type *Ty = AI->getAllocatedType();
227 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
229 std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
232 // If the alignment of the value is smaller than the size of the value,
233 // and if the size of the value is particularly small (<= 8 bytes),
234 // round up to the size of the value for potentially better performance.
236 // FIXME: This could be made better with a preferred alignment hook in
237 // TargetData. It serves primarily to 8-byte align doubles for X86.
238 if (Align < TySize && TySize <= 8) Align = TySize;
239 TySize *= CUI->getValue(); // Get total allocated size.
240 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
241 StaticAllocaMap[AI] =
242 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
245 for (; BB != EB; ++BB)
246 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
247 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
248 if (!isa<AllocaInst>(I) ||
249 !StaticAllocaMap.count(cast<AllocaInst>(I)))
250 InitializeRegForValue(I);
252 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
253 // also creates the initial PHI MachineInstrs, though none of the input
254 // operands are populated.
255 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
256 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
258 MF.getBasicBlockList().push_back(MBB);
260 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
263 for (BasicBlock::iterator I = BB->begin();
264 (PN = dyn_cast<PHINode>(I)); ++I)
265 if (!PN->use_empty()) {
266 MVT::ValueType VT = TLI.getValueType(PN->getType());
267 unsigned NumElements;
268 if (VT != MVT::Vector)
269 NumElements = TLI.getNumElements(VT);
271 MVT::ValueType VT1,VT2;
273 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
276 unsigned PHIReg = ValueMap[PN];
277 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
278 for (unsigned i = 0; i != NumElements; ++i)
279 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
284 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
285 /// the correctly promoted or expanded types. Assign these registers
286 /// consecutive vreg numbers and return the first assigned number.
287 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
288 MVT::ValueType VT = TLI.getValueType(V->getType());
290 // The number of multiples of registers that we need, to, e.g., split up
291 // a <2 x int64> -> 4 x i32 registers.
292 unsigned NumVectorRegs = 1;
294 // If this is a packed type, figure out what type it will decompose into
295 // and how many of the elements it will use.
296 if (VT == MVT::Vector) {
297 const PackedType *PTy = cast<PackedType>(V->getType());
298 unsigned NumElts = PTy->getNumElements();
299 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
301 // Divide the input until we get to a supported size. This will always
302 // end with a scalar if the target doesn't support vectors.
303 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
310 VT = getVectorType(EltTy, NumElts);
313 // The common case is that we will only create one register for this
314 // value. If we have that case, create and return the virtual register.
315 unsigned NV = TLI.getNumElements(VT);
317 // If we are promoting this value, pick the next largest supported type.
318 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
319 unsigned Reg = MakeReg(PromotedType);
320 // If this is a vector of supported or promoted types (e.g. 4 x i16),
321 // create all of the registers.
322 for (unsigned i = 1; i != NumVectorRegs; ++i)
323 MakeReg(PromotedType);
327 // If this value is represented with multiple target registers, make sure
328 // to create enough consecutive registers of the right (smaller) type.
329 unsigned NT = VT-1; // Find the type to use.
330 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
333 unsigned R = MakeReg((MVT::ValueType)NT);
334 for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
335 MakeReg((MVT::ValueType)NT);
339 //===----------------------------------------------------------------------===//
340 /// SelectionDAGLowering - This is the common target-independent lowering
341 /// implementation that is parameterized by a TargetLowering object.
342 /// Also, targets can overload any lowering method.
345 class SelectionDAGLowering {
346 MachineBasicBlock *CurMBB;
348 std::map<const Value*, SDOperand> NodeMap;
350 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
351 /// them up and then emit token factor nodes when possible. This allows us to
352 /// get simple disambiguation between loads without worrying about alias
354 std::vector<SDOperand> PendingLoads;
356 /// Case - A pair of values to record the Value for a switch case, and the
357 /// case's target basic block.
358 typedef std::pair<Constant*, MachineBasicBlock*> Case;
359 typedef std::vector<Case>::iterator CaseItr;
360 typedef std::pair<CaseItr, CaseItr> CaseRange;
362 /// CaseRec - A struct with ctor used in lowering switches to a binary tree
363 /// of conditional branches.
365 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
366 CaseBB(bb), LT(lt), GE(ge), Range(r) {}
368 /// CaseBB - The MBB in which to emit the compare and branch
369 MachineBasicBlock *CaseBB;
370 /// LT, GE - If nonzero, we know the current case value must be less-than or
371 /// greater-than-or-equal-to these Constants.
374 /// Range - A pair of iterators representing the range of case values to be
375 /// processed at this point in the binary search tree.
379 /// The comparison function for sorting Case values.
381 bool operator () (const Case& C1, const Case& C2) {
382 if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first))
383 return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue();
385 const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first);
386 return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue();
391 // TLI - This is information that describes the available target features we
392 // need for lowering. This indicates when operations are unavailable,
393 // implemented with a libcall, etc.
396 const TargetData &TD;
398 /// SwitchCases - Vector of CaseBlock structures used to communicate
399 /// SwitchInst code generation information.
400 std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
402 /// FuncInfo - Information about the function as a whole.
404 FunctionLoweringInfo &FuncInfo;
406 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
407 FunctionLoweringInfo &funcinfo)
408 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
412 /// getRoot - Return the current virtual root of the Selection DAG.
414 SDOperand getRoot() {
415 if (PendingLoads.empty())
416 return DAG.getRoot();
418 if (PendingLoads.size() == 1) {
419 SDOperand Root = PendingLoads[0];
421 PendingLoads.clear();
425 // Otherwise, we have to make a token factor node.
426 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
427 PendingLoads.clear();
432 void visit(Instruction &I) { visit(I.getOpcode(), I); }
434 void visit(unsigned Opcode, User &I) {
436 default: assert(0 && "Unknown instruction type encountered!");
438 // Build the switch statement using the Instruction.def file.
439 #define HANDLE_INST(NUM, OPCODE, CLASS) \
440 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
441 #include "llvm/Instruction.def"
445 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
447 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
448 SDOperand SrcValue, SDOperand Root,
451 SDOperand getIntPtrConstant(uint64_t Val) {
452 return DAG.getConstant(Val, TLI.getPointerTy());
455 SDOperand getValue(const Value *V);
457 const SDOperand &setValue(const Value *V, SDOperand NewN) {
458 SDOperand &N = NodeMap[V];
459 assert(N.Val == 0 && "Already set a value for this node!");
463 RegsForValue GetRegistersForValue(const std::string &ConstrCode,
465 bool OutReg, bool InReg,
466 std::set<unsigned> &OutputRegs,
467 std::set<unsigned> &InputRegs);
469 // Terminator instructions.
470 void visitRet(ReturnInst &I);
471 void visitBr(BranchInst &I);
472 void visitSwitch(SwitchInst &I);
473 void visitUnreachable(UnreachableInst &I) { /* noop */ }
475 // Helper for visitSwitch
476 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
478 // These all get lowered before this pass.
479 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
480 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
482 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
483 void visitShift(User &I, unsigned Opcode);
484 void visitAdd(User &I) {
485 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
487 void visitSub(User &I);
488 void visitMul(User &I) {
489 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
491 void visitDiv(User &I) {
492 const Type *Ty = I.getType();
494 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV,
495 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV);
497 void visitRem(User &I) {
498 const Type *Ty = I.getType();
499 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
501 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); }
502 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); }
503 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); }
504 void visitShl(User &I) { visitShift(I, ISD::SHL); }
505 void visitShr(User &I) {
506 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
509 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
510 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
511 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
512 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
513 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
514 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
515 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
517 void visitExtractElement(User &I);
518 void visitInsertElement(User &I);
519 void visitShuffleVector(User &I);
521 void visitGetElementPtr(User &I);
522 void visitCast(User &I);
523 void visitSelect(User &I);
525 void visitMalloc(MallocInst &I);
526 void visitFree(FreeInst &I);
527 void visitAlloca(AllocaInst &I);
528 void visitLoad(LoadInst &I);
529 void visitStore(StoreInst &I);
530 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
531 void visitCall(CallInst &I);
532 void visitInlineAsm(CallInst &I);
533 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
534 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
536 void visitVAStart(CallInst &I);
537 void visitVAArg(VAArgInst &I);
538 void visitVAEnd(CallInst &I);
539 void visitVACopy(CallInst &I);
540 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
542 void visitMemIntrinsic(CallInst &I, unsigned Op);
544 void visitUserOp1(Instruction &I) {
545 assert(0 && "UserOp1 should not exist at instruction selection time!");
548 void visitUserOp2(Instruction &I) {
549 assert(0 && "UserOp2 should not exist at instruction selection time!");
553 } // end namespace llvm
555 SDOperand SelectionDAGLowering::getValue(const Value *V) {
556 SDOperand &N = NodeMap[V];
559 const Type *VTy = V->getType();
560 MVT::ValueType VT = TLI.getValueType(VTy);
561 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
562 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
563 visit(CE->getOpcode(), *CE);
564 assert(N.Val && "visit didn't populate the ValueMap!");
566 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
567 return N = DAG.getGlobalAddress(GV, VT);
568 } else if (isa<ConstantPointerNull>(C)) {
569 return N = DAG.getConstant(0, TLI.getPointerTy());
570 } else if (isa<UndefValue>(C)) {
571 if (!isa<PackedType>(VTy))
572 return N = DAG.getNode(ISD::UNDEF, VT);
574 // Create a VBUILD_VECTOR of undef nodes.
575 const PackedType *PTy = cast<PackedType>(VTy);
576 unsigned NumElements = PTy->getNumElements();
577 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
579 std::vector<SDOperand> Ops;
580 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
582 // Create a VConstant node with generic Vector type.
583 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
584 Ops.push_back(DAG.getValueType(PVT));
585 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
586 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
587 return N = DAG.getConstantFP(CFP->getValue(), VT);
588 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
589 unsigned NumElements = PTy->getNumElements();
590 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
592 // Now that we know the number and type of the elements, push a
593 // Constant or ConstantFP node onto the ops list for each element of
594 // the packed constant.
595 std::vector<SDOperand> Ops;
596 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
597 for (unsigned i = 0; i != NumElements; ++i)
598 Ops.push_back(getValue(CP->getOperand(i)));
600 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
602 if (MVT::isFloatingPoint(PVT))
603 Op = DAG.getConstantFP(0, PVT);
605 Op = DAG.getConstant(0, PVT);
606 Ops.assign(NumElements, Op);
609 // Create a VBUILD_VECTOR node with generic Vector type.
610 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
611 Ops.push_back(DAG.getValueType(PVT));
612 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
614 // Canonicalize all constant ints to be unsigned.
615 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
619 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
620 std::map<const AllocaInst*, int>::iterator SI =
621 FuncInfo.StaticAllocaMap.find(AI);
622 if (SI != FuncInfo.StaticAllocaMap.end())
623 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
626 std::map<const Value*, unsigned>::const_iterator VMI =
627 FuncInfo.ValueMap.find(V);
628 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
630 unsigned InReg = VMI->second;
632 // If this type is not legal, make it so now.
633 if (VT != MVT::Vector) {
634 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
636 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
638 // Source must be expanded. This input value is actually coming from the
639 // register pair VMI->second and VMI->second+1.
640 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
641 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
642 } else if (DestVT > VT) { // Promotion case
643 if (MVT::isFloatingPoint(VT))
644 N = DAG.getNode(ISD::FP_ROUND, VT, N);
646 N = DAG.getNode(ISD::TRUNCATE, VT, N);
649 // Otherwise, if this is a vector, make it available as a generic vector
651 MVT::ValueType PTyElementVT, PTyLegalElementVT;
652 const PackedType *PTy = cast<PackedType>(VTy);
653 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
656 // Build a VBUILD_VECTOR with the input registers.
657 std::vector<SDOperand> Ops;
658 if (PTyElementVT == PTyLegalElementVT) {
659 // If the value types are legal, just VBUILD the CopyFromReg nodes.
660 for (unsigned i = 0; i != NE; ++i)
661 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
663 } else if (PTyElementVT < PTyLegalElementVT) {
664 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
665 for (unsigned i = 0; i != NE; ++i) {
666 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
668 if (MVT::isFloatingPoint(PTyElementVT))
669 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
671 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
675 // If the register was expanded, use BUILD_PAIR.
676 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
677 for (unsigned i = 0; i != NE/2; ++i) {
678 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
680 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
682 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
686 Ops.push_back(DAG.getConstant(NE, MVT::i32));
687 Ops.push_back(DAG.getValueType(PTyLegalElementVT));
688 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
690 // Finally, use a VBIT_CONVERT to make this available as the appropriate
692 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
693 DAG.getConstant(PTy->getNumElements(),
695 DAG.getValueType(TLI.getValueType(PTy->getElementType())));
702 void SelectionDAGLowering::visitRet(ReturnInst &I) {
703 if (I.getNumOperands() == 0) {
704 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
707 std::vector<SDOperand> NewValues;
708 NewValues.push_back(getRoot());
709 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
710 SDOperand RetOp = getValue(I.getOperand(i));
712 // If this is an integer return value, we need to promote it ourselves to
713 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
715 if (MVT::isInteger(RetOp.getValueType()) &&
716 RetOp.getValueType() < MVT::i64) {
717 MVT::ValueType TmpVT;
718 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
719 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
723 if (I.getOperand(i)->getType()->isSigned())
724 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
726 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
728 NewValues.push_back(RetOp);
730 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues));
733 void SelectionDAGLowering::visitBr(BranchInst &I) {
734 // Update machine-CFG edges.
735 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
736 CurMBB->addSuccessor(Succ0MBB);
738 // Figure out which block is immediately after the current one.
739 MachineBasicBlock *NextBlock = 0;
740 MachineFunction::iterator BBI = CurMBB;
741 if (++BBI != CurMBB->getParent()->end())
744 if (I.isUnconditional()) {
745 // If this is not a fall-through branch, emit the branch.
746 if (Succ0MBB != NextBlock)
747 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
748 DAG.getBasicBlock(Succ0MBB)));
750 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
751 CurMBB->addSuccessor(Succ1MBB);
753 SDOperand Cond = getValue(I.getCondition());
754 if (Succ1MBB == NextBlock) {
755 // If the condition is false, fall through. This means we should branch
756 // if the condition is true to Succ #0.
757 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
758 Cond, DAG.getBasicBlock(Succ0MBB)));
759 } else if (Succ0MBB == NextBlock) {
760 // If the condition is true, fall through. This means we should branch if
761 // the condition is false to Succ #1. Invert the condition first.
762 SDOperand True = DAG.getConstant(1, Cond.getValueType());
763 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
764 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
765 Cond, DAG.getBasicBlock(Succ1MBB)));
767 std::vector<SDOperand> Ops;
768 Ops.push_back(getRoot());
769 // If the false case is the current basic block, then this is a self
770 // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it
771 // adds an extra instruction in the loop. Instead, invert the
772 // condition and emit "Loop: ... br!cond Loop; br Out.
773 if (CurMBB == Succ1MBB) {
774 std::swap(Succ0MBB, Succ1MBB);
775 SDOperand True = DAG.getConstant(1, Cond.getValueType());
776 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
778 SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
779 DAG.getBasicBlock(Succ0MBB));
780 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True,
781 DAG.getBasicBlock(Succ1MBB)));
786 /// visitSwitchCase - Emits the necessary code to represent a single node in
787 /// the binary search tree resulting from lowering a switch instruction.
788 void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
789 SDOperand SwitchOp = getValue(CB.SwitchV);
790 SDOperand CaseOp = getValue(CB.CaseC);
791 SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC);
793 // Set NextBlock to be the MBB immediately after the current one, if any.
794 // This is used to avoid emitting unnecessary branches to the next block.
795 MachineBasicBlock *NextBlock = 0;
796 MachineFunction::iterator BBI = CurMBB;
797 if (++BBI != CurMBB->getParent()->end())
800 // If the lhs block is the next block, invert the condition so that we can
801 // fall through to the lhs instead of the rhs block.
802 if (CB.LHSBB == NextBlock) {
803 std::swap(CB.LHSBB, CB.RHSBB);
804 SDOperand True = DAG.getConstant(1, Cond.getValueType());
805 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
807 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
808 DAG.getBasicBlock(CB.LHSBB));
809 if (CB.RHSBB == NextBlock)
812 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
813 DAG.getBasicBlock(CB.RHSBB)));
814 // Update successor info
815 CurMBB->addSuccessor(CB.LHSBB);
816 CurMBB->addSuccessor(CB.RHSBB);
819 void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
820 // Figure out which block is immediately after the current one.
821 MachineBasicBlock *NextBlock = 0;
822 MachineFunction::iterator BBI = CurMBB;
823 if (++BBI != CurMBB->getParent()->end())
826 // If there is only the default destination, branch to it if it is not the
827 // next basic block. Otherwise, just fall through.
828 if (I.getNumOperands() == 2) {
829 // Update machine-CFG edges.
830 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()];
831 // If this is not a fall-through branch, emit the branch.
832 if (DefaultMBB != NextBlock)
833 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
834 DAG.getBasicBlock(DefaultMBB)));
838 // If there are any non-default case statements, create a vector of Cases
839 // representing each one, and sort the vector so that we can efficiently
840 // create a binary search tree from them.
841 std::vector<Case> Cases;
842 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
843 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
844 Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
846 std::sort(Cases.begin(), Cases.end(), CaseCmp());
848 // Get the Value to be switched on and default basic blocks, which will be
849 // inserted into CaseBlock records, representing basic blocks in the binary
851 Value *SV = I.getOperand(0);
852 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
854 // Get the current MachineFunction and LLVM basic block, for use in creating
855 // and inserting new MBBs during the creation of the binary search tree.
856 MachineFunction *CurMF = CurMBB->getParent();
857 const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
859 // Push the initial CaseRec onto the worklist
860 std::vector<CaseRec> CaseVec;
861 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
863 while (!CaseVec.empty()) {
864 // Grab a record representing a case range to process off the worklist
865 CaseRec CR = CaseVec.back();
868 // Size is the number of Cases represented by this range. If Size is 1,
869 // then we are processing a leaf of the binary search tree. Otherwise,
870 // we need to pick a pivot, and push left and right ranges onto the
872 unsigned Size = CR.Range.second - CR.Range.first;
875 // Create a CaseBlock record representing a conditional branch to
876 // the Case's target mbb if the value being switched on SV is equal
877 // to C. Otherwise, branch to default.
878 Constant *C = CR.Range.first->first;
879 MachineBasicBlock *Target = CR.Range.first->second;
880 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
882 // If the MBB representing the leaf node is the current MBB, then just
883 // call visitSwitchCase to emit the code into the current block.
884 // Otherwise, push the CaseBlock onto the vector to be later processed
885 // by SDISel, and insert the node's MBB before the next MBB.
886 if (CR.CaseBB == CurMBB)
889 SwitchCases.push_back(CB);
890 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
893 // split case range at pivot
894 CaseItr Pivot = CR.Range.first + (Size / 2);
895 CaseRange LHSR(CR.Range.first, Pivot);
896 CaseRange RHSR(Pivot, CR.Range.second);
897 Constant *C = Pivot->first;
898 MachineBasicBlock *RHSBB = 0, *LHSBB = 0;
899 // We know that we branch to the LHS if the Value being switched on is
900 // less than the Pivot value, C. We use this to optimize our binary
901 // tree a bit, by recognizing that if SV is greater than or equal to the
902 // LHS's Case Value, and that Case Value is exactly one less than the
903 // Pivot's Value, then we can branch directly to the LHS's Target,
904 // rather than creating a leaf node for it.
905 if ((LHSR.second - LHSR.first) == 1 &&
906 LHSR.first->first == CR.GE &&
907 cast<ConstantIntegral>(C)->getRawValue() ==
908 (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) {
909 LHSBB = LHSR.first->second;
911 LHSBB = new MachineBasicBlock(LLVMBB);
912 CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR));
914 // Similar to the optimization above, if the Value being switched on is
915 // known to be less than the Constant CR.LT, and the current Case Value
916 // is CR.LT - 1, then we can branch directly to the target block for
917 // the current Case Value, rather than emitting a RHS leaf node for it.
918 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
919 cast<ConstantIntegral>(RHSR.first->first)->getRawValue() ==
920 (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) {
921 RHSBB = RHSR.first->second;
923 RHSBB = new MachineBasicBlock(LLVMBB);
924 CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR));
926 // Create a CaseBlock record representing a conditional branch to
927 // the LHS node if the value being switched on SV is less than C.
928 // Otherwise, branch to LHS.
929 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
930 SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB);
931 if (CR.CaseBB == CurMBB)
934 SwitchCases.push_back(CB);
935 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
941 void SelectionDAGLowering::visitSub(User &I) {
943 if (I.getType()->isFloatingPoint()) {
944 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
945 if (CFP->isExactlyValue(-0.0)) {
946 SDOperand Op2 = getValue(I.getOperand(1));
947 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
951 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
954 void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
956 const Type *Ty = I.getType();
957 SDOperand Op1 = getValue(I.getOperand(0));
958 SDOperand Op2 = getValue(I.getOperand(1));
960 if (Ty->isIntegral()) {
961 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
962 } else if (Ty->isFloatingPoint()) {
963 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
965 const PackedType *PTy = cast<PackedType>(Ty);
966 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
967 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
968 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
972 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
973 SDOperand Op1 = getValue(I.getOperand(0));
974 SDOperand Op2 = getValue(I.getOperand(1));
976 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
978 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
981 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
982 ISD::CondCode UnsignedOpcode) {
983 SDOperand Op1 = getValue(I.getOperand(0));
984 SDOperand Op2 = getValue(I.getOperand(1));
985 ISD::CondCode Opcode = SignedOpcode;
986 if (I.getOperand(0)->getType()->isUnsigned())
987 Opcode = UnsignedOpcode;
988 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
991 void SelectionDAGLowering::visitSelect(User &I) {
992 SDOperand Cond = getValue(I.getOperand(0));
993 SDOperand TrueVal = getValue(I.getOperand(1));
994 SDOperand FalseVal = getValue(I.getOperand(2));
995 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
999 void SelectionDAGLowering::visitCast(User &I) {
1000 SDOperand N = getValue(I.getOperand(0));
1001 MVT::ValueType SrcVT = N.getValueType();
1002 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1004 if (DestVT == MVT::Vector) {
1005 // This is a cast to a vector from something else. This is always a bit
1006 // convert. Get information about the input vector.
1007 const PackedType *DestTy = cast<PackedType>(I.getType());
1008 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1009 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
1010 DAG.getConstant(DestTy->getNumElements(),MVT::i32),
1011 DAG.getValueType(EltVT)));
1012 } else if (SrcVT == DestVT) {
1013 setValue(&I, N); // noop cast.
1014 } else if (DestVT == MVT::i1) {
1015 // Cast to bool is a comparison against zero, not truncation to zero.
1016 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
1017 DAG.getConstantFP(0.0, N.getValueType());
1018 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
1019 } else if (isInteger(SrcVT)) {
1020 if (isInteger(DestVT)) { // Int -> Int cast
1021 if (DestVT < SrcVT) // Truncating cast?
1022 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1023 else if (I.getOperand(0)->getType()->isSigned())
1024 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
1026 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1027 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
1028 if (I.getOperand(0)->getType()->isSigned())
1029 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
1031 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
1033 assert(0 && "Unknown cast!");
1035 } else if (isFloatingPoint(SrcVT)) {
1036 if (isFloatingPoint(DestVT)) { // FP -> FP cast
1037 if (DestVT < SrcVT) // Rounding cast?
1038 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
1040 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
1041 } else if (isInteger(DestVT)) { // FP -> Int cast.
1042 if (I.getType()->isSigned())
1043 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
1045 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
1047 assert(0 && "Unknown cast!");
1050 assert(SrcVT == MVT::Vector && "Unknown cast!");
1051 assert(DestVT != MVT::Vector && "Casts to vector already handled!");
1052 // This is a cast from a vector to something else. This is always a bit
1053 // convert. Get information about the input vector.
1054 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
1058 void SelectionDAGLowering::visitInsertElement(User &I) {
1059 SDOperand InVec = getValue(I.getOperand(0));
1060 SDOperand InVal = getValue(I.getOperand(1));
1061 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1062 getValue(I.getOperand(2)));
1064 SDOperand Num = *(InVec.Val->op_end()-2);
1065 SDOperand Typ = *(InVec.Val->op_end()-1);
1066 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
1067 InVec, InVal, InIdx, Num, Typ));
1070 void SelectionDAGLowering::visitExtractElement(User &I) {
1071 SDOperand InVec = getValue(I.getOperand(0));
1072 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1073 getValue(I.getOperand(1)));
1074 SDOperand Typ = *(InVec.Val->op_end()-1);
1075 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
1076 TLI.getValueType(I.getType()), InVec, InIdx));
1079 void SelectionDAGLowering::visitShuffleVector(User &I) {
1080 SDOperand V1 = getValue(I.getOperand(0));
1081 SDOperand V2 = getValue(I.getOperand(1));
1082 SDOperand Mask = getValue(I.getOperand(2));
1084 SDOperand Num = *(V1.Val->op_end()-2);
1085 SDOperand Typ = *(V2.Val->op_end()-1);
1086 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
1087 V1, V2, Mask, Num, Typ));
1091 void SelectionDAGLowering::visitGetElementPtr(User &I) {
1092 SDOperand N = getValue(I.getOperand(0));
1093 const Type *Ty = I.getOperand(0)->getType();
1094 const Type *UIntPtrTy = TD.getIntPtrType();
1096 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
1099 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1100 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1103 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
1104 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
1105 getIntPtrConstant(Offset));
1107 Ty = StTy->getElementType(Field);
1109 Ty = cast<SequentialType>(Ty)->getElementType();
1111 // If this is a constant subscript, handle it quickly.
1112 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1113 if (CI->getRawValue() == 0) continue;
1116 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1117 Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
1119 Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1120 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
1124 // N = N + Idx * ElementSize;
1125 uint64_t ElementSize = TD.getTypeSize(Ty);
1126 SDOperand IdxN = getValue(Idx);
1128 // If the index is smaller or larger than intptr_t, truncate or extend
1130 if (IdxN.getValueType() < N.getValueType()) {
1131 if (Idx->getType()->isSigned())
1132 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
1134 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
1135 } else if (IdxN.getValueType() > N.getValueType())
1136 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
1138 // If this is a multiply by a power of two, turn it into a shl
1139 // immediately. This is a very common case.
1140 if (isPowerOf2_64(ElementSize)) {
1141 unsigned Amt = Log2_64(ElementSize);
1142 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
1143 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
1144 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1148 SDOperand Scale = getIntPtrConstant(ElementSize);
1149 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
1150 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1156 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
1157 // If this is a fixed sized alloca in the entry block of the function,
1158 // allocate it statically on the stack.
1159 if (FuncInfo.StaticAllocaMap.count(&I))
1160 return; // getValue will auto-populate this.
1162 const Type *Ty = I.getAllocatedType();
1163 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
1164 unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
1167 SDOperand AllocSize = getValue(I.getArraySize());
1168 MVT::ValueType IntPtr = TLI.getPointerTy();
1169 if (IntPtr < AllocSize.getValueType())
1170 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
1171 else if (IntPtr > AllocSize.getValueType())
1172 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
1174 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
1175 getIntPtrConstant(TySize));
1177 // Handle alignment. If the requested alignment is less than or equal to the
1178 // stack alignment, ignore it and round the size of the allocation up to the
1179 // stack alignment size. If the size is greater than the stack alignment, we
1180 // note this in the DYNAMIC_STACKALLOC node.
1181 unsigned StackAlign =
1182 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
1183 if (Align <= StackAlign) {
1185 // Add SA-1 to the size.
1186 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
1187 getIntPtrConstant(StackAlign-1));
1188 // Mask out the low bits for alignment purposes.
1189 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
1190 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
1193 std::vector<MVT::ValueType> VTs;
1194 VTs.push_back(AllocSize.getValueType());
1195 VTs.push_back(MVT::Other);
1196 std::vector<SDOperand> Ops;
1197 Ops.push_back(getRoot());
1198 Ops.push_back(AllocSize);
1199 Ops.push_back(getIntPtrConstant(Align));
1200 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
1201 DAG.setRoot(setValue(&I, DSA).getValue(1));
1203 // Inform the Frame Information that we have just allocated a variable-sized
1205 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
1208 void SelectionDAGLowering::visitLoad(LoadInst &I) {
1209 SDOperand Ptr = getValue(I.getOperand(0));
1215 // Do not serialize non-volatile loads against each other.
1216 Root = DAG.getRoot();
1219 setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)),
1220 Root, I.isVolatile()));
1223 SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
1224 SDOperand SrcValue, SDOperand Root,
1227 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1228 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
1229 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue);
1231 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue);
1235 DAG.setRoot(L.getValue(1));
1237 PendingLoads.push_back(L.getValue(1));
1243 void SelectionDAGLowering::visitStore(StoreInst &I) {
1244 Value *SrcV = I.getOperand(0);
1245 SDOperand Src = getValue(SrcV);
1246 SDOperand Ptr = getValue(I.getOperand(1));
1247 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1248 DAG.getSrcValue(I.getOperand(1))));
1251 /// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
1252 /// access memory and has no other side effects at all.
1253 static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
1254 #define GET_NO_MEMORY_INTRINSICS
1255 #include "llvm/Intrinsics.gen"
1256 #undef GET_NO_MEMORY_INTRINSICS
1260 // IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
1261 // have any side-effects or if it only reads memory.
1262 static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
1263 #define GET_SIDE_EFFECT_INFO
1264 #include "llvm/Intrinsics.gen"
1265 #undef GET_SIDE_EFFECT_INFO
1269 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
1271 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
1272 unsigned Intrinsic) {
1273 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
1274 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
1276 // Build the operand list.
1277 std::vector<SDOperand> Ops;
1278 if (HasChain) { // If this intrinsic has side-effects, chainify it.
1280 // We don't need to serialize loads against other loads.
1281 Ops.push_back(DAG.getRoot());
1283 Ops.push_back(getRoot());
1287 // Add the intrinsic ID as an integer operand.
1288 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
1290 // Add all operands of the call to the operand list.
1291 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1292 SDOperand Op = getValue(I.getOperand(i));
1294 // If this is a vector type, force it to the right packed type.
1295 if (Op.getValueType() == MVT::Vector) {
1296 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
1297 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
1299 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
1300 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
1301 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
1304 assert(TLI.isTypeLegal(Op.getValueType()) &&
1305 "Intrinsic uses a non-legal type?");
1309 std::vector<MVT::ValueType> VTs;
1310 if (I.getType() != Type::VoidTy) {
1311 MVT::ValueType VT = TLI.getValueType(I.getType());
1312 if (VT == MVT::Vector) {
1313 const PackedType *DestTy = cast<PackedType>(I.getType());
1314 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1316 VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
1317 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
1320 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
1324 VTs.push_back(MVT::Other);
1329 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops);
1330 else if (I.getType() != Type::VoidTy)
1331 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops);
1333 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops);
1336 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
1338 PendingLoads.push_back(Chain);
1342 if (I.getType() != Type::VoidTy) {
1343 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
1344 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
1345 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
1346 DAG.getConstant(PTy->getNumElements(), MVT::i32),
1347 DAG.getValueType(EVT));
1349 setValue(&I, Result);
1353 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
1354 /// we want to emit this as a call to a named external function, return the name
1355 /// otherwise lower it and return null.
1357 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1358 switch (Intrinsic) {
1360 // By default, turn this into a target intrinsic node.
1361 visitTargetIntrinsic(I, Intrinsic);
1363 case Intrinsic::vastart: visitVAStart(I); return 0;
1364 case Intrinsic::vaend: visitVAEnd(I); return 0;
1365 case Intrinsic::vacopy: visitVACopy(I); return 0;
1366 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
1367 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
1368 case Intrinsic::setjmp:
1369 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1371 case Intrinsic::longjmp:
1372 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1374 case Intrinsic::memcpy_i32:
1375 case Intrinsic::memcpy_i64:
1376 visitMemIntrinsic(I, ISD::MEMCPY);
1378 case Intrinsic::memset_i32:
1379 case Intrinsic::memset_i64:
1380 visitMemIntrinsic(I, ISD::MEMSET);
1382 case Intrinsic::memmove_i32:
1383 case Intrinsic::memmove_i64:
1384 visitMemIntrinsic(I, ISD::MEMMOVE);
1387 case Intrinsic::dbg_stoppoint: {
1388 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1389 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
1390 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
1391 std::vector<SDOperand> Ops;
1393 Ops.push_back(getRoot());
1394 Ops.push_back(getValue(SPI.getLineValue()));
1395 Ops.push_back(getValue(SPI.getColumnValue()));
1397 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
1398 assert(DD && "Not a debug information descriptor");
1399 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
1401 Ops.push_back(DAG.getString(CompileUnit->getFileName()));
1402 Ops.push_back(DAG.getString(CompileUnit->getDirectory()));
1404 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
1409 case Intrinsic::dbg_region_start: {
1410 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1411 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
1412 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
1413 std::vector<SDOperand> Ops;
1415 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
1417 Ops.push_back(getRoot());
1418 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1420 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1425 case Intrinsic::dbg_region_end: {
1426 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1427 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
1428 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
1429 std::vector<SDOperand> Ops;
1431 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
1433 Ops.push_back(getRoot());
1434 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1436 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1441 case Intrinsic::dbg_func_start: {
1442 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1443 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
1444 if (DebugInfo && FSI.getSubprogram() &&
1445 DebugInfo->Verify(FSI.getSubprogram())) {
1446 std::vector<SDOperand> Ops;
1448 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
1450 Ops.push_back(getRoot());
1451 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1453 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1458 case Intrinsic::dbg_declare: {
1459 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1460 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
1461 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
1462 std::vector<SDOperand> Ops;
1464 SDOperand AddressOp = getValue(DI.getAddress());
1465 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) {
1466 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
1473 case Intrinsic::isunordered_f32:
1474 case Intrinsic::isunordered_f64:
1475 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1476 getValue(I.getOperand(2)), ISD::SETUO));
1479 case Intrinsic::sqrt_f32:
1480 case Intrinsic::sqrt_f64:
1481 setValue(&I, DAG.getNode(ISD::FSQRT,
1482 getValue(I.getOperand(1)).getValueType(),
1483 getValue(I.getOperand(1))));
1485 case Intrinsic::pcmarker: {
1486 SDOperand Tmp = getValue(I.getOperand(1));
1487 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1490 case Intrinsic::readcyclecounter: {
1491 std::vector<MVT::ValueType> VTs;
1492 VTs.push_back(MVT::i64);
1493 VTs.push_back(MVT::Other);
1494 std::vector<SDOperand> Ops;
1495 Ops.push_back(getRoot());
1496 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1498 DAG.setRoot(Tmp.getValue(1));
1501 case Intrinsic::bswap_i16:
1502 case Intrinsic::bswap_i32:
1503 case Intrinsic::bswap_i64:
1504 setValue(&I, DAG.getNode(ISD::BSWAP,
1505 getValue(I.getOperand(1)).getValueType(),
1506 getValue(I.getOperand(1))));
1508 case Intrinsic::cttz_i8:
1509 case Intrinsic::cttz_i16:
1510 case Intrinsic::cttz_i32:
1511 case Intrinsic::cttz_i64:
1512 setValue(&I, DAG.getNode(ISD::CTTZ,
1513 getValue(I.getOperand(1)).getValueType(),
1514 getValue(I.getOperand(1))));
1516 case Intrinsic::ctlz_i8:
1517 case Intrinsic::ctlz_i16:
1518 case Intrinsic::ctlz_i32:
1519 case Intrinsic::ctlz_i64:
1520 setValue(&I, DAG.getNode(ISD::CTLZ,
1521 getValue(I.getOperand(1)).getValueType(),
1522 getValue(I.getOperand(1))));
1524 case Intrinsic::ctpop_i8:
1525 case Intrinsic::ctpop_i16:
1526 case Intrinsic::ctpop_i32:
1527 case Intrinsic::ctpop_i64:
1528 setValue(&I, DAG.getNode(ISD::CTPOP,
1529 getValue(I.getOperand(1)).getValueType(),
1530 getValue(I.getOperand(1))));
1532 case Intrinsic::stacksave: {
1533 std::vector<MVT::ValueType> VTs;
1534 VTs.push_back(TLI.getPointerTy());
1535 VTs.push_back(MVT::Other);
1536 std::vector<SDOperand> Ops;
1537 Ops.push_back(getRoot());
1538 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1540 DAG.setRoot(Tmp.getValue(1));
1543 case Intrinsic::stackrestore: {
1544 SDOperand Tmp = getValue(I.getOperand(1));
1545 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1548 case Intrinsic::prefetch:
1549 // FIXME: Currently discarding prefetches.
1555 void SelectionDAGLowering::visitCall(CallInst &I) {
1556 const char *RenameFn = 0;
1557 if (Function *F = I.getCalledFunction()) {
1558 if (F->isExternal())
1559 if (unsigned IID = F->getIntrinsicID()) {
1560 RenameFn = visitIntrinsicCall(I, IID);
1563 } else { // Not an LLVM intrinsic.
1564 const std::string &Name = F->getName();
1565 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
1566 if (I.getNumOperands() == 3 && // Basic sanity checks.
1567 I.getOperand(1)->getType()->isFloatingPoint() &&
1568 I.getType() == I.getOperand(1)->getType() &&
1569 I.getType() == I.getOperand(2)->getType()) {
1570 SDOperand LHS = getValue(I.getOperand(1));
1571 SDOperand RHS = getValue(I.getOperand(2));
1572 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
1576 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1577 if (I.getNumOperands() == 2 && // Basic sanity checks.
1578 I.getOperand(1)->getType()->isFloatingPoint() &&
1579 I.getType() == I.getOperand(1)->getType()) {
1580 SDOperand Tmp = getValue(I.getOperand(1));
1581 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1584 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1585 if (I.getNumOperands() == 2 && // Basic sanity checks.
1586 I.getOperand(1)->getType()->isFloatingPoint() &&
1587 I.getType() == I.getOperand(1)->getType()) {
1588 SDOperand Tmp = getValue(I.getOperand(1));
1589 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1592 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1593 if (I.getNumOperands() == 2 && // Basic sanity checks.
1594 I.getOperand(1)->getType()->isFloatingPoint() &&
1595 I.getType() == I.getOperand(1)->getType()) {
1596 SDOperand Tmp = getValue(I.getOperand(1));
1597 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1602 } else if (isa<InlineAsm>(I.getOperand(0))) {
1609 Callee = getValue(I.getOperand(0));
1611 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1612 std::vector<std::pair<SDOperand, const Type*> > Args;
1613 Args.reserve(I.getNumOperands());
1614 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1615 Value *Arg = I.getOperand(i);
1616 SDOperand ArgNode = getValue(Arg);
1617 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1620 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1621 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1623 std::pair<SDOperand,SDOperand> Result =
1624 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1625 I.isTailCall(), Callee, Args, DAG);
1626 if (I.getType() != Type::VoidTy)
1627 setValue(&I, Result.first);
1628 DAG.setRoot(Result.second);
1631 SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
1632 SDOperand &Chain, SDOperand &Flag)const{
1633 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
1634 Chain = Val.getValue(1);
1635 Flag = Val.getValue(2);
1637 // If the result was expanded, copy from the top part.
1638 if (Regs.size() > 1) {
1639 assert(Regs.size() == 2 &&
1640 "Cannot expand to more than 2 elts yet!");
1641 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
1642 Chain = Val.getValue(1);
1643 Flag = Val.getValue(2);
1644 if (DAG.getTargetLoweringInfo().isLittleEndian())
1645 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
1647 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
1650 // Otherwise, if the return value was promoted, truncate it to the
1651 // appropriate type.
1652 if (RegVT == ValueVT)
1655 if (MVT::isInteger(RegVT))
1656 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
1658 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
1661 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
1662 /// specified value into the registers specified by this object. This uses
1663 /// Chain/Flag as the input and updates them for the output Chain/Flag.
1664 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
1665 SDOperand &Chain, SDOperand &Flag) const {
1666 if (Regs.size() == 1) {
1667 // If there is a single register and the types differ, this must be
1669 if (RegVT != ValueVT) {
1670 if (MVT::isInteger(RegVT))
1671 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
1673 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
1675 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
1676 Flag = Chain.getValue(1);
1678 std::vector<unsigned> R(Regs);
1679 if (!DAG.getTargetLoweringInfo().isLittleEndian())
1680 std::reverse(R.begin(), R.end());
1682 for (unsigned i = 0, e = R.size(); i != e; ++i) {
1683 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
1684 DAG.getConstant(i, MVT::i32));
1685 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
1686 Flag = Chain.getValue(1);
1691 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
1692 /// operand list. This adds the code marker and includes the number of
1693 /// values added into it.
1694 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
1695 std::vector<SDOperand> &Ops) const {
1696 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
1697 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
1698 Ops.push_back(DAG.getRegister(Regs[i], RegVT));
1701 /// isAllocatableRegister - If the specified register is safe to allocate,
1702 /// i.e. it isn't a stack pointer or some other special register, return the
1703 /// register class for the register. Otherwise, return null.
1704 static const TargetRegisterClass *
1705 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
1706 const TargetLowering &TLI, const MRegisterInfo *MRI) {
1707 MVT::ValueType FoundVT = MVT::Other;
1708 const TargetRegisterClass *FoundRC = 0;
1709 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
1710 E = MRI->regclass_end(); RCI != E; ++RCI) {
1711 MVT::ValueType ThisVT = MVT::Other;
1713 const TargetRegisterClass *RC = *RCI;
1714 // If none of the the value types for this register class are valid, we
1715 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1716 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1718 if (TLI.isTypeLegal(*I)) {
1719 // If we have already found this register in a different register class,
1720 // choose the one with the largest VT specified. For example, on
1721 // PowerPC, we favor f64 register classes over f32.
1722 if (FoundVT == MVT::Other ||
1723 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
1730 if (ThisVT == MVT::Other) continue;
1732 // NOTE: This isn't ideal. In particular, this might allocate the
1733 // frame pointer in functions that need it (due to them not being taken
1734 // out of allocation, because a variable sized allocation hasn't been seen
1735 // yet). This is a slight code pessimization, but should still work.
1736 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
1737 E = RC->allocation_order_end(MF); I != E; ++I)
1739 // We found a matching register class. Keep looking at others in case
1740 // we find one with larger registers that this physreg is also in.
1749 RegsForValue SelectionDAGLowering::
1750 GetRegistersForValue(const std::string &ConstrCode,
1751 MVT::ValueType VT, bool isOutReg, bool isInReg,
1752 std::set<unsigned> &OutputRegs,
1753 std::set<unsigned> &InputRegs) {
1754 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
1755 TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
1756 std::vector<unsigned> Regs;
1758 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
1759 MVT::ValueType RegVT;
1760 MVT::ValueType ValueVT = VT;
1762 if (PhysReg.first) {
1763 if (VT == MVT::Other)
1764 ValueVT = *PhysReg.second->vt_begin();
1767 // This is a explicit reference to a physical register.
1768 Regs.push_back(PhysReg.first);
1770 // If this is an expanded reference, add the rest of the regs to Regs.
1772 RegVT = *PhysReg.second->vt_begin();
1773 TargetRegisterClass::iterator I = PhysReg.second->begin();
1774 TargetRegisterClass::iterator E = PhysReg.second->end();
1775 for (; *I != PhysReg.first; ++I)
1776 assert(I != E && "Didn't find reg!");
1778 // Already added the first reg.
1780 for (; NumRegs; --NumRegs, ++I) {
1781 assert(I != E && "Ran out of registers to allocate!");
1785 return RegsForValue(Regs, RegVT, ValueVT);
1788 // This is a reference to a register class. Allocate NumRegs consecutive,
1789 // available, registers from the class.
1790 std::vector<unsigned> RegClassRegs =
1791 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
1793 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
1794 MachineFunction &MF = *CurMBB->getParent();
1795 unsigned NumAllocated = 0;
1796 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
1797 unsigned Reg = RegClassRegs[i];
1798 // See if this register is available.
1799 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
1800 (isInReg && InputRegs.count(Reg))) { // Already used.
1801 // Make sure we find consecutive registers.
1806 // Check to see if this register is allocatable (i.e. don't give out the
1808 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
1810 // Make sure we find consecutive registers.
1815 // Okay, this register is good, we can use it.
1818 // If we allocated enough consecutive
1819 if (NumAllocated == NumRegs) {
1820 unsigned RegStart = (i-NumAllocated)+1;
1821 unsigned RegEnd = i+1;
1822 // Mark all of the allocated registers used.
1823 for (unsigned i = RegStart; i != RegEnd; ++i) {
1824 unsigned Reg = RegClassRegs[i];
1825 Regs.push_back(Reg);
1826 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
1827 if (isInReg) InputRegs.insert(Reg); // Mark reg used.
1830 return RegsForValue(Regs, *RC->vt_begin(), VT);
1834 // Otherwise, we couldn't allocate enough registers for this.
1835 return RegsForValue();
1839 /// visitInlineAsm - Handle a call to an InlineAsm object.
1841 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1842 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1844 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
1847 // Note, we treat inline asms both with and without side-effects as the same.
1848 // If an inline asm doesn't have side effects and doesn't access memory, we
1849 // could not choose to not chain it.
1850 bool hasSideEffects = IA->hasSideEffects();
1852 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
1853 std::vector<MVT::ValueType> ConstraintVTs;
1855 /// AsmNodeOperands - A list of pairs. The first element is a register, the
1856 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
1857 /// if it is a def of that register.
1858 std::vector<SDOperand> AsmNodeOperands;
1859 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
1860 AsmNodeOperands.push_back(AsmStr);
1862 SDOperand Chain = getRoot();
1865 // We fully assign registers here at isel time. This is not optimal, but
1866 // should work. For register classes that correspond to LLVM classes, we
1867 // could let the LLVM RA do its thing, but we currently don't. Do a prepass
1868 // over the constraints, collecting fixed registers that we know we can't use.
1869 std::set<unsigned> OutputRegs, InputRegs;
1871 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1872 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
1873 std::string &ConstraintCode = Constraints[i].Codes[0];
1875 MVT::ValueType OpVT;
1877 // Compute the value type for each operand and add it to ConstraintVTs.
1878 switch (Constraints[i].Type) {
1879 case InlineAsm::isOutput:
1880 if (!Constraints[i].isIndirectOutput) {
1881 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
1882 OpVT = TLI.getValueType(I.getType());
1884 const Type *OpTy = I.getOperand(OpNum)->getType();
1885 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
1886 OpNum++; // Consumes a call operand.
1889 case InlineAsm::isInput:
1890 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
1891 OpNum++; // Consumes a call operand.
1893 case InlineAsm::isClobber:
1898 ConstraintVTs.push_back(OpVT);
1900 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
1901 continue; // Not assigned a fixed reg.
1903 // Build a list of regs that this operand uses. This always has a single
1904 // element for promoted/expanded operands.
1905 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
1907 OutputRegs, InputRegs);
1909 switch (Constraints[i].Type) {
1910 case InlineAsm::isOutput:
1911 // We can't assign any other output to this register.
1912 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1913 // If this is an early-clobber output, it cannot be assigned to the same
1914 // value as the input reg.
1915 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
1916 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1918 case InlineAsm::isInput:
1919 // We can't assign any other input to this register.
1920 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1922 case InlineAsm::isClobber:
1923 // Clobbered regs cannot be used as inputs or outputs.
1924 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1925 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1930 // Loop over all of the inputs, copying the operand values into the
1931 // appropriate registers and processing the output regs.
1932 RegsForValue RetValRegs;
1933 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
1936 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1937 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
1938 std::string &ConstraintCode = Constraints[i].Codes[0];
1940 switch (Constraints[i].Type) {
1941 case InlineAsm::isOutput: {
1942 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
1943 if (ConstraintCode.size() == 1) // not a physreg name.
1944 CTy = TLI.getConstraintType(ConstraintCode[0]);
1946 if (CTy == TargetLowering::C_Memory) {
1948 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
1950 // Check that the operand (the address to store to) isn't a float.
1951 if (!MVT::isInteger(InOperandVal.getValueType()))
1952 assert(0 && "MATCH FAIL!");
1954 if (!Constraints[i].isIndirectOutput)
1955 assert(0 && "MATCH FAIL!");
1957 OpNum++; // Consumes a call operand.
1959 // Extend/truncate to the right pointer type if needed.
1960 MVT::ValueType PtrType = TLI.getPointerTy();
1961 if (InOperandVal.getValueType() < PtrType)
1962 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
1963 else if (InOperandVal.getValueType() > PtrType)
1964 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
1966 // Add information to the INLINEASM node to know about this output.
1967 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
1968 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
1969 AsmNodeOperands.push_back(InOperandVal);
1973 // Otherwise, this is a register output.
1974 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
1976 // If this is an early-clobber output, or if there is an input
1977 // constraint that matches this, we need to reserve the input register
1978 // so no other inputs allocate to it.
1979 bool UsesInputRegister = false;
1980 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
1981 UsesInputRegister = true;
1983 // Copy the output from the appropriate register. Find a register that
1986 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
1987 true, UsesInputRegister,
1988 OutputRegs, InputRegs);
1989 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!");
1991 if (!Constraints[i].isIndirectOutput) {
1992 assert(RetValRegs.Regs.empty() &&
1993 "Cannot have multiple output constraints yet!");
1994 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
1997 IndirectStoresToEmit.push_back(std::make_pair(Regs,
1998 I.getOperand(OpNum)));
1999 OpNum++; // Consumes a call operand.
2002 // Add information to the INLINEASM node to know that this register is
2004 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
2007 case InlineAsm::isInput: {
2008 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2009 OpNum++; // Consumes a call operand.
2011 if (isdigit(ConstraintCode[0])) { // Matching constraint?
2012 // If this is required to match an output register we have already set,
2013 // just use its register.
2014 unsigned OperandNo = atoi(ConstraintCode.c_str());
2016 // Scan until we find the definition we already emitted of this operand.
2017 // When we find it, create a RegsForValue operand.
2018 unsigned CurOp = 2; // The first operand.
2019 for (; OperandNo; --OperandNo) {
2020 // Advance to the next operand.
2022 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2023 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2024 "Skipped past definitions?");
2025 CurOp += (NumOps>>3)+1;
2029 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2030 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2031 "Skipped past definitions?");
2033 // Add NumOps>>3 registers to MatchedRegs.
2034 RegsForValue MatchedRegs;
2035 MatchedRegs.ValueVT = InOperandVal.getValueType();
2036 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
2037 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
2038 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
2039 MatchedRegs.Regs.push_back(Reg);
2042 // Use the produced MatchedRegs object to
2043 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
2044 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
2048 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2049 if (ConstraintCode.size() == 1) // not a physreg name.
2050 CTy = TLI.getConstraintType(ConstraintCode[0]);
2052 if (CTy == TargetLowering::C_Other) {
2053 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
2054 assert(0 && "MATCH FAIL!");
2056 // Add information to the INLINEASM node to know about this input.
2057 unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
2058 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2059 AsmNodeOperands.push_back(InOperandVal);
2061 } else if (CTy == TargetLowering::C_Memory) {
2064 // Check that the operand isn't a float.
2065 if (!MVT::isInteger(InOperandVal.getValueType()))
2066 assert(0 && "MATCH FAIL!");
2068 // Extend/truncate to the right pointer type if needed.
2069 MVT::ValueType PtrType = TLI.getPointerTy();
2070 if (InOperandVal.getValueType() < PtrType)
2071 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2072 else if (InOperandVal.getValueType() > PtrType)
2073 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2075 // Add information to the INLINEASM node to know about this input.
2076 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2077 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2078 AsmNodeOperands.push_back(InOperandVal);
2082 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2084 // Copy the input into the appropriate registers.
2085 RegsForValue InRegs =
2086 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2087 false, true, OutputRegs, InputRegs);
2088 // FIXME: should be match fail.
2089 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
2091 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
2093 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
2096 case InlineAsm::isClobber: {
2097 RegsForValue ClobberedRegs =
2098 GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
2099 OutputRegs, InputRegs);
2100 // Add the clobbered value to the operand list, so that the register
2101 // allocator is aware that the physreg got clobbered.
2102 if (!ClobberedRegs.Regs.empty())
2103 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
2109 // Finish up input operands.
2110 AsmNodeOperands[0] = Chain;
2111 if (Flag.Val) AsmNodeOperands.push_back(Flag);
2113 std::vector<MVT::ValueType> VTs;
2114 VTs.push_back(MVT::Other);
2115 VTs.push_back(MVT::Flag);
2116 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
2117 Flag = Chain.getValue(1);
2119 // If this asm returns a register value, copy the result from that register
2120 // and set it as the value of the call.
2121 if (!RetValRegs.Regs.empty())
2122 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
2124 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
2126 // Process indirect outputs, first output all of the flagged copies out of
2128 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
2129 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
2130 Value *Ptr = IndirectStoresToEmit[i].second;
2131 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
2132 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
2135 // Emit the non-flagged stores from the physregs.
2136 std::vector<SDOperand> OutChains;
2137 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
2138 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
2139 StoresToEmit[i].first,
2140 getValue(StoresToEmit[i].second),
2141 DAG.getSrcValue(StoresToEmit[i].second)));
2142 if (!OutChains.empty())
2143 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
2148 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
2149 SDOperand Src = getValue(I.getOperand(0));
2151 MVT::ValueType IntPtr = TLI.getPointerTy();
2153 if (IntPtr < Src.getValueType())
2154 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
2155 else if (IntPtr > Src.getValueType())
2156 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
2158 // Scale the source by the type size.
2159 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
2160 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
2161 Src, getIntPtrConstant(ElementSize));
2163 std::vector<std::pair<SDOperand, const Type*> > Args;
2164 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
2166 std::pair<SDOperand,SDOperand> Result =
2167 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
2168 DAG.getExternalSymbol("malloc", IntPtr),
2170 setValue(&I, Result.first); // Pointers always fit in registers
2171 DAG.setRoot(Result.second);
2174 void SelectionDAGLowering::visitFree(FreeInst &I) {
2175 std::vector<std::pair<SDOperand, const Type*> > Args;
2176 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
2177 TLI.getTargetData().getIntPtrType()));
2178 MVT::ValueType IntPtr = TLI.getPointerTy();
2179 std::pair<SDOperand,SDOperand> Result =
2180 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
2181 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
2182 DAG.setRoot(Result.second);
2185 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
2186 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
2187 // instructions are special in various ways, which require special support to
2188 // insert. The specified MachineInstr is created but not inserted into any
2189 // basic blocks, and the scheduler passes ownership of it to this method.
2190 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2191 MachineBasicBlock *MBB) {
2192 std::cerr << "If a target marks an instruction with "
2193 "'usesCustomDAGSchedInserter', it must implement "
2194 "TargetLowering::InsertAtEndOfBasicBlock!\n";
2199 void SelectionDAGLowering::visitVAStart(CallInst &I) {
2200 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
2201 getValue(I.getOperand(1)),
2202 DAG.getSrcValue(I.getOperand(1))));
2205 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
2206 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
2207 getValue(I.getOperand(0)),
2208 DAG.getSrcValue(I.getOperand(0)));
2210 DAG.setRoot(V.getValue(1));
2213 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
2214 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
2215 getValue(I.getOperand(1)),
2216 DAG.getSrcValue(I.getOperand(1))));
2219 void SelectionDAGLowering::visitVACopy(CallInst &I) {
2220 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
2221 getValue(I.getOperand(1)),
2222 getValue(I.getOperand(2)),
2223 DAG.getSrcValue(I.getOperand(1)),
2224 DAG.getSrcValue(I.getOperand(2))));
2227 // It is always conservatively correct for llvm.returnaddress and
2228 // llvm.frameaddress to return 0.
2229 std::pair<SDOperand, SDOperand>
2230 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
2231 unsigned Depth, SelectionDAG &DAG) {
2232 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
2235 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2236 assert(0 && "LowerOperation not implemented for this target!");
2241 SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
2242 SelectionDAG &DAG) {
2243 assert(0 && "CustomPromoteOperation not implemented for this target!");
2248 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
2249 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
2250 std::pair<SDOperand,SDOperand> Result =
2251 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
2252 setValue(&I, Result.first);
2253 DAG.setRoot(Result.second);
2256 /// getMemsetValue - Vectorized representation of the memset value
2258 static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
2259 SelectionDAG &DAG) {
2260 MVT::ValueType CurVT = VT;
2261 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
2262 uint64_t Val = C->getValue() & 255;
2264 while (CurVT != MVT::i8) {
2265 Val = (Val << Shift) | Val;
2267 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2269 return DAG.getConstant(Val, VT);
2271 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
2273 while (CurVT != MVT::i8) {
2275 DAG.getNode(ISD::OR, VT,
2276 DAG.getNode(ISD::SHL, VT, Value,
2277 DAG.getConstant(Shift, MVT::i8)), Value);
2279 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2286 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
2287 /// used when a memcpy is turned into a memset when the source is a constant
2289 static SDOperand getMemsetStringVal(MVT::ValueType VT,
2290 SelectionDAG &DAG, TargetLowering &TLI,
2291 std::string &Str, unsigned Offset) {
2292 MVT::ValueType CurVT = VT;
2294 unsigned MSB = getSizeInBits(VT) / 8;
2295 if (TLI.isLittleEndian())
2296 Offset = Offset + MSB - 1;
2297 for (unsigned i = 0; i != MSB; ++i) {
2298 Val = (Val << 8) | Str[Offset];
2299 Offset += TLI.isLittleEndian() ? -1 : 1;
2301 return DAG.getConstant(Val, VT);
2304 /// getMemBasePlusOffset - Returns base and offset node for the
2305 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
2306 SelectionDAG &DAG, TargetLowering &TLI) {
2307 MVT::ValueType VT = Base.getValueType();
2308 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
2311 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
2312 /// to replace the memset / memcpy is below the threshold. It also returns the
2313 /// types of the sequence of memory ops to perform memset / memcpy.
2314 static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
2315 unsigned Limit, uint64_t Size,
2316 unsigned Align, TargetLowering &TLI) {
2319 if (TLI.allowsUnalignedMemoryAccesses()) {
2322 switch (Align & 7) {
2338 MVT::ValueType LVT = MVT::i64;
2339 while (!TLI.isTypeLegal(LVT))
2340 LVT = (MVT::ValueType)((unsigned)LVT - 1);
2341 assert(MVT::isInteger(LVT));
2346 unsigned NumMemOps = 0;
2348 unsigned VTSize = getSizeInBits(VT) / 8;
2349 while (VTSize > Size) {
2350 VT = (MVT::ValueType)((unsigned)VT - 1);
2353 assert(MVT::isInteger(VT));
2355 if (++NumMemOps > Limit)
2357 MemOps.push_back(VT);
2364 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
2365 SDOperand Op1 = getValue(I.getOperand(1));
2366 SDOperand Op2 = getValue(I.getOperand(2));
2367 SDOperand Op3 = getValue(I.getOperand(3));
2368 SDOperand Op4 = getValue(I.getOperand(4));
2369 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
2370 if (Align == 0) Align = 1;
2372 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
2373 std::vector<MVT::ValueType> MemOps;
2375 // Expand memset / memcpy to a series of load / store ops
2376 // if the size operand falls below a certain threshold.
2377 std::vector<SDOperand> OutChains;
2379 default: break; // Do nothing for now.
2381 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
2382 Size->getValue(), Align, TLI)) {
2383 unsigned NumMemOps = MemOps.size();
2384 unsigned Offset = 0;
2385 for (unsigned i = 0; i < NumMemOps; i++) {
2386 MVT::ValueType VT = MemOps[i];
2387 unsigned VTSize = getSizeInBits(VT) / 8;
2388 SDOperand Value = getMemsetValue(Op2, VT, DAG);
2389 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(),
2391 getMemBasePlusOffset(Op1, Offset, DAG, TLI),
2392 DAG.getSrcValue(I.getOperand(1), Offset));
2393 OutChains.push_back(Store);
2400 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
2401 Size->getValue(), Align, TLI)) {
2402 unsigned NumMemOps = MemOps.size();
2403 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
2404 GlobalAddressSDNode *G = NULL;
2406 bool CopyFromStr = false;
2408 if (Op2.getOpcode() == ISD::GlobalAddress)
2409 G = cast<GlobalAddressSDNode>(Op2);
2410 else if (Op2.getOpcode() == ISD::ADD &&
2411 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
2412 Op2.getOperand(1).getOpcode() == ISD::Constant) {
2413 G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
2414 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
2417 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
2419 Str = GV->getStringValue(false);
2427 for (unsigned i = 0; i < NumMemOps; i++) {
2428 MVT::ValueType VT = MemOps[i];
2429 unsigned VTSize = getSizeInBits(VT) / 8;
2430 SDOperand Value, Chain, Store;
2433 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
2436 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2437 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2438 DAG.getSrcValue(I.getOperand(1), DstOff));
2440 Value = DAG.getLoad(VT, getRoot(),
2441 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
2442 DAG.getSrcValue(I.getOperand(2), SrcOff));
2443 Chain = Value.getValue(1);
2445 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2446 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2447 DAG.getSrcValue(I.getOperand(1), DstOff));
2449 OutChains.push_back(Store);
2458 if (!OutChains.empty()) {
2459 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains));
2464 std::vector<SDOperand> Ops;
2465 Ops.push_back(getRoot());
2470 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
2473 //===----------------------------------------------------------------------===//
2474 // SelectionDAGISel code
2475 //===----------------------------------------------------------------------===//
2477 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
2478 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
2481 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
2482 // FIXME: we only modify the CFG to split critical edges. This
2483 // updates dom and loop info.
2487 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
2488 /// casting to the type of GEPI.
2489 static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
2490 Value *Ptr, Value *PtrOffset) {
2491 if (V) return V; // Already computed.
2493 BasicBlock::iterator InsertPt;
2494 if (BB == GEPI->getParent()) {
2495 // If insert into the GEP's block, insert right after the GEP.
2499 // Otherwise, insert at the top of BB, after any PHI nodes
2500 InsertPt = BB->begin();
2501 while (isa<PHINode>(InsertPt)) ++InsertPt;
2504 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
2505 // BB so that there is only one value live across basic blocks (the cast
2507 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
2508 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
2509 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
2511 // Add the offset, cast it to the right type.
2512 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
2513 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
2518 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
2519 /// selection, we want to be a bit careful about some things. In particular, if
2520 /// we have a GEP instruction that is used in a different block than it is
2521 /// defined, the addressing expression of the GEP cannot be folded into loads or
2522 /// stores that use it. In this case, decompose the GEP and move constant
2523 /// indices into blocks that use it.
2524 static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
2525 const TargetData &TD) {
2526 // If this GEP is only used inside the block it is defined in, there is no
2527 // need to rewrite it.
2528 bool isUsedOutsideDefBB = false;
2529 BasicBlock *DefBB = GEPI->getParent();
2530 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
2532 if (cast<Instruction>(*UI)->getParent() != DefBB) {
2533 isUsedOutsideDefBB = true;
2537 if (!isUsedOutsideDefBB) return;
2539 // If this GEP has no non-zero constant indices, there is nothing we can do,
2541 bool hasConstantIndex = false;
2542 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
2543 E = GEPI->op_end(); OI != E; ++OI) {
2544 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
2545 if (CI->getRawValue()) {
2546 hasConstantIndex = true;
2550 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
2551 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
2553 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
2554 // constant offset (which we now know is non-zero) and deal with it later.
2555 uint64_t ConstantOffset = 0;
2556 const Type *UIntPtrTy = TD.getIntPtrType();
2557 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
2558 const Type *Ty = GEPI->getOperand(0)->getType();
2560 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
2561 E = GEPI->op_end(); OI != E; ++OI) {
2563 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2564 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
2566 ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
2567 Ty = StTy->getElementType(Field);
2569 Ty = cast<SequentialType>(Ty)->getElementType();
2571 // Handle constant subscripts.
2572 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2573 if (CI->getRawValue() == 0) continue;
2575 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
2576 ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
2578 ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
2582 // Ptr = Ptr + Idx * ElementSize;
2584 // Cast Idx to UIntPtrTy if needed.
2585 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
2587 uint64_t ElementSize = TD.getTypeSize(Ty);
2588 // Mask off bits that should not be set.
2589 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2590 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
2592 // Multiply by the element size and add to the base.
2593 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
2594 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
2598 // Make sure that the offset fits in uintptr_t.
2599 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2600 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
2602 // Okay, we have now emitted all of the variable index parts to the BB that
2603 // the GEP is defined in. Loop over all of the using instructions, inserting
2604 // an "add Ptr, ConstantOffset" into each block that uses it and update the
2605 // instruction to use the newly computed value, making GEPI dead. When the
2606 // user is a load or store instruction address, we emit the add into the user
2607 // block, otherwise we use a canonical version right next to the gep (these
2608 // won't be foldable as addresses, so we might as well share the computation).
2610 std::map<BasicBlock*,Value*> InsertedExprs;
2611 while (!GEPI->use_empty()) {
2612 Instruction *User = cast<Instruction>(GEPI->use_back());
2614 // If this use is not foldable into the addressing mode, use a version
2615 // emitted in the GEP block.
2617 if (!isa<LoadInst>(User) &&
2618 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
2619 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
2622 // Otherwise, insert the code in the User's block so it can be folded into
2623 // any users in that block.
2624 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
2625 User->getParent(), GEPI,
2628 User->replaceUsesOfWith(GEPI, NewVal);
2631 // Finally, the GEP is dead, remove it.
2632 GEPI->eraseFromParent();
2635 bool SelectionDAGISel::runOnFunction(Function &Fn) {
2636 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
2637 RegMap = MF.getSSARegMap();
2638 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
2640 // First, split all critical edges for PHI nodes with incoming values that are
2641 // constants, this way the load of the constant into a vreg will not be placed
2642 // into MBBs that are used some other way.
2644 // In this pass we also look for GEP instructions that are used across basic
2645 // blocks and rewrites them to improve basic-block-at-a-time selection.
2647 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
2649 BasicBlock::iterator BBI;
2650 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
2651 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2652 if (isa<Constant>(PN->getIncomingValue(i)))
2653 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
2655 for (BasicBlock::iterator E = BB->end(); BBI != E; )
2656 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
2657 OptimizeGEPExpression(GEPI, TLI.getTargetData());
2660 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
2662 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
2663 SelectBasicBlock(I, MF, FuncInfo);
2669 SDOperand SelectionDAGISel::
2670 CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
2671 SDOperand Op = SDL.getValue(V);
2672 assert((Op.getOpcode() != ISD::CopyFromReg ||
2673 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
2674 "Copy from a reg to the same reg!");
2676 // If this type is not legal, we must make sure to not create an invalid
2678 MVT::ValueType SrcVT = Op.getValueType();
2679 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
2680 SelectionDAG &DAG = SDL.DAG;
2681 if (SrcVT == DestVT) {
2682 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
2683 } else if (SrcVT == MVT::Vector) {
2684 // Handle copies from generic vectors to registers.
2685 MVT::ValueType PTyElementVT, PTyLegalElementVT;
2686 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
2687 PTyElementVT, PTyLegalElementVT);
2689 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
2690 // MVT::Vector type.
2691 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
2692 DAG.getConstant(NE, MVT::i32),
2693 DAG.getValueType(PTyElementVT));
2695 // Loop over all of the elements of the resultant vector,
2696 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
2697 // copying them into output registers.
2698 std::vector<SDOperand> OutChains;
2699 SDOperand Root = SDL.getRoot();
2700 for (unsigned i = 0; i != NE; ++i) {
2701 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
2702 Op, DAG.getConstant(i, MVT::i32));
2703 if (PTyElementVT == PTyLegalElementVT) {
2704 // Elements are legal.
2705 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
2706 } else if (PTyLegalElementVT > PTyElementVT) {
2707 // Elements are promoted.
2708 if (MVT::isFloatingPoint(PTyLegalElementVT))
2709 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
2711 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
2712 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
2714 // Elements are expanded.
2715 // The src value is expanded into multiple registers.
2716 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
2717 Elt, DAG.getConstant(0, MVT::i32));
2718 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
2719 Elt, DAG.getConstant(1, MVT::i32));
2720 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
2721 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
2724 return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
2725 } else if (SrcVT < DestVT) {
2726 // The src value is promoted to the register.
2727 if (MVT::isFloatingPoint(SrcVT))
2728 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
2730 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
2731 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
2733 // The src value is expanded into multiple registers.
2734 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
2735 Op, DAG.getConstant(0, MVT::i32));
2736 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
2737 Op, DAG.getConstant(1, MVT::i32));
2738 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
2739 return DAG.getCopyToReg(Op, Reg+1, Hi);
2743 void SelectionDAGISel::
2744 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
2745 std::vector<SDOperand> &UnorderedChains) {
2746 // If this is the entry block, emit arguments.
2747 Function &F = *BB->getParent();
2748 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
2749 SDOperand OldRoot = SDL.DAG.getRoot();
2750 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
2753 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
2755 if (!AI->use_empty()) {
2756 SDL.setValue(AI, Args[a]);
2758 // If this argument is live outside of the entry block, insert a copy from
2759 // whereever we got it to the vreg that other BB's will reference it as.
2760 if (FuncInfo.ValueMap.count(AI)) {
2762 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
2763 UnorderedChains.push_back(Copy);
2767 // Next, if the function has live ins that need to be copied into vregs,
2768 // emit the copies now, into the top of the block.
2769 MachineFunction &MF = SDL.DAG.getMachineFunction();
2770 if (MF.livein_begin() != MF.livein_end()) {
2771 SSARegMap *RegMap = MF.getSSARegMap();
2772 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo();
2773 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
2774 E = MF.livein_end(); LI != E; ++LI)
2776 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
2777 LI->first, RegMap->getRegClass(LI->second));
2780 // Finally, if the target has anything special to do, allow it to do so.
2781 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
2785 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
2786 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
2787 FunctionLoweringInfo &FuncInfo) {
2788 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
2790 std::vector<SDOperand> UnorderedChains;
2792 // Lower any arguments needed in this block if this is the entry block.
2793 if (LLVMBB == &LLVMBB->getParent()->front())
2794 LowerArguments(LLVMBB, SDL, UnorderedChains);
2796 BB = FuncInfo.MBBMap[LLVMBB];
2797 SDL.setCurrentBasicBlock(BB);
2799 // Lower all of the non-terminator instructions.
2800 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
2804 // Ensure that all instructions which are used outside of their defining
2805 // blocks are available as virtual registers.
2806 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
2807 if (!I->use_empty() && !isa<PHINode>(I)) {
2808 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
2809 if (VMI != FuncInfo.ValueMap.end())
2810 UnorderedChains.push_back(
2811 CopyValueToVirtualRegister(SDL, I, VMI->second));
2814 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
2815 // ensure constants are generated when needed. Remember the virtual registers
2816 // that need to be added to the Machine PHI nodes as input. We cannot just
2817 // directly add them, because expansion might result in multiple MBB's for one
2818 // BB. As such, the start of the BB might correspond to a different MBB than
2822 // Emit constants only once even if used by multiple PHI nodes.
2823 std::map<Constant*, unsigned> ConstantsOut;
2825 // Check successor nodes PHI nodes that expect a constant to be available from
2827 TerminatorInst *TI = LLVMBB->getTerminator();
2828 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2829 BasicBlock *SuccBB = TI->getSuccessor(succ);
2830 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
2833 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2834 // nodes and Machine PHI nodes, but the incoming operands have not been
2836 for (BasicBlock::iterator I = SuccBB->begin();
2837 (PN = dyn_cast<PHINode>(I)); ++I)
2838 if (!PN->use_empty()) {
2840 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2841 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
2842 unsigned &RegOut = ConstantsOut[C];
2844 RegOut = FuncInfo.CreateRegForValue(C);
2845 UnorderedChains.push_back(
2846 CopyValueToVirtualRegister(SDL, C, RegOut));
2850 Reg = FuncInfo.ValueMap[PHIOp];
2852 assert(isa<AllocaInst>(PHIOp) &&
2853 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
2854 "Didn't codegen value into a register!??");
2855 Reg = FuncInfo.CreateRegForValue(PHIOp);
2856 UnorderedChains.push_back(
2857 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
2861 // Remember that this register needs to added to the machine PHI node as
2862 // the input for this MBB.
2863 MVT::ValueType VT = TLI.getValueType(PN->getType());
2864 unsigned NumElements;
2865 if (VT != MVT::Vector)
2866 NumElements = TLI.getNumElements(VT);
2868 MVT::ValueType VT1,VT2;
2870 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
2873 for (unsigned i = 0, e = NumElements; i != e; ++i)
2874 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
2877 ConstantsOut.clear();
2879 // Turn all of the unordered chains into one factored node.
2880 if (!UnorderedChains.empty()) {
2881 SDOperand Root = SDL.getRoot();
2882 if (Root.getOpcode() != ISD::EntryToken) {
2883 unsigned i = 0, e = UnorderedChains.size();
2884 for (; i != e; ++i) {
2885 assert(UnorderedChains[i].Val->getNumOperands() > 1);
2886 if (UnorderedChains[i].Val->getOperand(0) == Root)
2887 break; // Don't add the root if we already indirectly depend on it.
2891 UnorderedChains.push_back(Root);
2893 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
2896 // Lower the terminator after the copies are emitted.
2897 SDL.visit(*LLVMBB->getTerminator());
2899 // Copy over any CaseBlock records that may now exist due to SwitchInst
2901 SwitchCases.clear();
2902 SwitchCases = SDL.SwitchCases;
2904 // Make sure the root of the DAG is up-to-date.
2905 DAG.setRoot(SDL.getRoot());
2908 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
2909 // Run the DAG combiner in pre-legalize mode.
2912 DEBUG(std::cerr << "Lowered selection DAG:\n");
2915 // Second step, hack on the DAG until it only uses operations and types that
2916 // the target supports.
2919 DEBUG(std::cerr << "Legalized selection DAG:\n");
2922 // Run the DAG combiner in post-legalize mode.
2925 if (ViewISelDAGs) DAG.viewGraph();
2927 // Third, instruction select all of the operations to machine code, adding the
2928 // code to the MachineBasicBlock.
2929 InstructionSelectBasicBlock(DAG);
2931 DEBUG(std::cerr << "Selected machine code:\n");
2935 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
2936 FunctionLoweringInfo &FuncInfo) {
2937 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
2939 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
2942 // First step, lower LLVM code to some DAG. This DAG may use operations and
2943 // types that are not supported by the target.
2944 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
2946 // Second step, emit the lowered DAG as machine code.
2947 CodeGenAndEmitDAG(DAG);
2950 // Next, now that we know what the last MBB the LLVM BB expanded is, update
2951 // PHI nodes in successors.
2952 if (SwitchCases.empty()) {
2953 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
2954 MachineInstr *PHI = PHINodesToUpdate[i].first;
2955 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
2956 "This is not a machine PHI node that we are updating!");
2957 PHI->addRegOperand(PHINodesToUpdate[i].second);
2958 PHI->addMachineBasicBlockOperand(BB);
2963 // If we generated any switch lowering information, build and codegen any
2964 // additional DAGs necessary.
2965 for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
2966 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
2968 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
2969 // Set the current basic block to the mbb we wish to insert the code into
2970 BB = SwitchCases[i].ThisBB;
2971 SDL.setCurrentBasicBlock(BB);
2973 SDL.visitSwitchCase(SwitchCases[i]);
2974 SDAG.setRoot(SDL.getRoot());
2975 CodeGenAndEmitDAG(SDAG);
2976 // Iterate over the phi nodes, if there is a phi node in a successor of this
2977 // block (for instance, the default block), then add a pair of operands to
2978 // the phi node for this block, as if we were coming from the original
2979 // BB before switch expansion.
2980 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
2981 MachineInstr *PHI = PHINodesToUpdate[pi].first;
2982 MachineBasicBlock *PHIBB = PHI->getParent();
2983 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
2984 "This is not a machine PHI node that we are updating!");
2985 if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) {
2986 PHI->addRegOperand(PHINodesToUpdate[pi].second);
2987 PHI->addMachineBasicBlockOperand(BB);
2993 //===----------------------------------------------------------------------===//
2994 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
2995 /// target node in the graph.
2996 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
2997 if (ViewSchedDAGs) DAG.viewGraph();
2998 ScheduleDAG *SL = NULL;
3000 switch (ISHeuristic) {
3001 default: assert(0 && "Unrecognized scheduling heuristic");
3002 case defaultScheduling:
3003 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
3004 SL = createSimpleDAGScheduler(noScheduling, DAG, BB);
3005 else /* TargetLowering::SchedulingForRegPressure */
3006 SL = createBURRListDAGScheduler(DAG, BB);
3009 SL = createBFS_DAGScheduler(DAG, BB);
3011 case simpleScheduling:
3012 SL = createSimpleDAGScheduler(false, DAG, BB);
3014 case simpleNoItinScheduling:
3015 SL = createSimpleDAGScheduler(true, DAG, BB);
3017 case listSchedulingBURR:
3018 SL = createBURRListDAGScheduler(DAG, BB);
3020 case listSchedulingTD:
3021 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
3028 HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
3029 return new HazardRecognizer();
3032 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
3033 /// by tblgen. Others should not call it.
3034 void SelectionDAGISel::
3035 SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
3036 std::vector<SDOperand> InOps;
3037 std::swap(InOps, Ops);
3039 Ops.push_back(InOps[0]); // input chain.
3040 Ops.push_back(InOps[1]); // input asm string.
3042 const char *AsmStr = cast<ExternalSymbolSDNode>(InOps[1])->getSymbol();
3043 unsigned i = 2, e = InOps.size();
3044 if (InOps[e-1].getValueType() == MVT::Flag)
3045 --e; // Don't process a flag operand if it is here.
3048 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
3049 if ((Flags & 7) != 4 /*MEM*/) {
3050 // Just skip over this operand, copying the operands verbatim.
3051 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
3052 i += (Flags >> 3) + 1;
3054 assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
3055 // Otherwise, this is a memory operand. Ask the target to select it.
3056 std::vector<SDOperand> SelOps;
3057 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
3058 std::cerr << "Could not match memory address. Inline asm failure!\n";
3062 // Add this to the output node.
3063 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
3064 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
3069 // Add the flag input back if present.
3070 if (e != InOps.size())
3071 Ops.push_back(InOps.back());