1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/CodeGen/SelectionDAGISel.h"
16 #include "llvm/CodeGen/ScheduleDAG.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/InlineAsm.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/CodeGen/IntrinsicLowering.h"
26 #include "llvm/CodeGen/MachineDebugInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/SelectionDAG.h"
31 #include "llvm/CodeGen/SSARegMap.h"
32 #include "llvm/Target/MRegisterInfo.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/Target/TargetFrameInfo.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetLowering.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Debug.h"
48 ViewISelDAGs("view-isel-dags", cl::Hidden,
49 cl::desc("Pop up a window to show isel dags as they are selected"));
51 ViewSchedDAGs("view-sched-dags", cl::Hidden,
52 cl::desc("Pop up a window to show sched dags as they are processed"));
54 static const bool ViewISelDAGs = 0;
55 static const bool ViewSchedDAGs = 0;
59 cl::opt<SchedHeuristics>
62 cl::desc("Choose scheduling style"),
63 cl::init(defaultScheduling),
65 clEnumValN(defaultScheduling, "default",
66 "Target preferred scheduling style"),
67 clEnumValN(noScheduling, "none",
68 "No scheduling: breadth first sequencing"),
69 clEnumValN(simpleScheduling, "simple",
70 "Simple two pass scheduling: minimize critical path "
71 "and maximize processor utilization"),
72 clEnumValN(simpleNoItinScheduling, "simple-noitin",
73 "Simple two pass scheduling: Same as simple "
74 "except using generic latency"),
75 clEnumValN(listSchedulingBURR, "list-burr",
76 "Bottom up register reduction list scheduling"),
82 //===--------------------------------------------------------------------===//
83 /// FunctionLoweringInfo - This contains information that is global to a
84 /// function that is used when lowering a region of the function.
85 class FunctionLoweringInfo {
92 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
94 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
95 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
97 /// ValueMap - Since we emit code for the function a basic block at a time,
98 /// we must remember which virtual registers hold the values for
99 /// cross-basic-block values.
100 std::map<const Value*, unsigned> ValueMap;
102 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
103 /// the entry block. This allows the allocas to be efficiently referenced
104 /// anywhere in the function.
105 std::map<const AllocaInst*, int> StaticAllocaMap;
107 unsigned MakeReg(MVT::ValueType VT) {
108 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
111 unsigned CreateRegForValue(const Value *V) {
112 MVT::ValueType VT = TLI.getValueType(V->getType());
113 // The common case is that we will only create one register for this
114 // value. If we have that case, create and return the virtual register.
115 unsigned NV = TLI.getNumElements(VT);
117 // If we are promoting this value, pick the next largest supported type.
118 return MakeReg(TLI.getTypeToTransformTo(VT));
121 // If this value is represented with multiple target registers, make sure
122 // to create enough consequtive registers of the right (smaller) type.
123 unsigned NT = VT-1; // Find the type to use.
124 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
127 unsigned R = MakeReg((MVT::ValueType)NT);
128 for (unsigned i = 1; i != NV; ++i)
129 MakeReg((MVT::ValueType)NT);
133 unsigned InitializeRegForValue(const Value *V) {
134 unsigned &R = ValueMap[V];
135 assert(R == 0 && "Already initialized this value register!");
136 return R = CreateRegForValue(V);
141 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
142 /// PHI nodes or outside of the basic block that defines it.
143 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
144 if (isa<PHINode>(I)) return true;
145 BasicBlock *BB = I->getParent();
146 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
147 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
152 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
153 /// entry block, return true.
154 static bool isOnlyUsedInEntryBlock(Argument *A) {
155 BasicBlock *Entry = A->getParent()->begin();
156 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
157 if (cast<Instruction>(*UI)->getParent() != Entry)
158 return false; // Use not in entry block.
162 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
163 Function &fn, MachineFunction &mf)
164 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
166 // Create a vreg for each argument register that is not dead and is used
167 // outside of the entry block for the function.
168 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
170 if (!isOnlyUsedInEntryBlock(AI))
171 InitializeRegForValue(AI);
173 // Initialize the mapping of values to registers. This is only set up for
174 // instruction values that are used outside of the block that defines
176 Function::iterator BB = Fn.begin(), EB = Fn.end();
177 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
178 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
179 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
180 const Type *Ty = AI->getAllocatedType();
181 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
183 std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
186 // If the alignment of the value is smaller than the size of the value,
187 // and if the size of the value is particularly small (<= 8 bytes),
188 // round up to the size of the value for potentially better performance.
190 // FIXME: This could be made better with a preferred alignment hook in
191 // TargetData. It serves primarily to 8-byte align doubles for X86.
192 if (Align < TySize && TySize <= 8) Align = TySize;
193 TySize *= CUI->getValue(); // Get total allocated size.
194 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
195 StaticAllocaMap[AI] =
196 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
199 for (; BB != EB; ++BB)
200 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
201 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
202 if (!isa<AllocaInst>(I) ||
203 !StaticAllocaMap.count(cast<AllocaInst>(I)))
204 InitializeRegForValue(I);
206 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
207 // also creates the initial PHI MachineInstrs, though none of the input
208 // operands are populated.
209 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
210 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
212 MF.getBasicBlockList().push_back(MBB);
214 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
217 for (BasicBlock::iterator I = BB->begin();
218 (PN = dyn_cast<PHINode>(I)); ++I)
219 if (!PN->use_empty()) {
220 unsigned NumElements =
221 TLI.getNumElements(TLI.getValueType(PN->getType()));
222 unsigned PHIReg = ValueMap[PN];
223 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
224 for (unsigned i = 0; i != NumElements; ++i)
225 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
232 //===----------------------------------------------------------------------===//
233 /// SelectionDAGLowering - This is the common target-independent lowering
234 /// implementation that is parameterized by a TargetLowering object.
235 /// Also, targets can overload any lowering method.
238 class SelectionDAGLowering {
239 MachineBasicBlock *CurMBB;
241 std::map<const Value*, SDOperand> NodeMap;
243 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
244 /// them up and then emit token factor nodes when possible. This allows us to
245 /// get simple disambiguation between loads without worrying about alias
247 std::vector<SDOperand> PendingLoads;
250 // TLI - This is information that describes the available target features we
251 // need for lowering. This indicates when operations are unavailable,
252 // implemented with a libcall, etc.
255 const TargetData &TD;
257 /// FuncInfo - Information about the function as a whole.
259 FunctionLoweringInfo &FuncInfo;
261 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
262 FunctionLoweringInfo &funcinfo)
263 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
267 /// getRoot - Return the current virtual root of the Selection DAG.
269 SDOperand getRoot() {
270 if (PendingLoads.empty())
271 return DAG.getRoot();
273 if (PendingLoads.size() == 1) {
274 SDOperand Root = PendingLoads[0];
276 PendingLoads.clear();
280 // Otherwise, we have to make a token factor node.
281 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
282 PendingLoads.clear();
287 void visit(Instruction &I) { visit(I.getOpcode(), I); }
289 void visit(unsigned Opcode, User &I) {
291 default: assert(0 && "Unknown instruction type encountered!");
293 // Build the switch statement using the Instruction.def file.
294 #define HANDLE_INST(NUM, OPCODE, CLASS) \
295 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
296 #include "llvm/Instruction.def"
300 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
303 SDOperand getIntPtrConstant(uint64_t Val) {
304 return DAG.getConstant(Val, TLI.getPointerTy());
307 SDOperand getValue(const Value *V) {
308 SDOperand &N = NodeMap[V];
311 const Type *VTy = V->getType();
312 MVT::ValueType VT = TLI.getValueType(VTy);
313 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
314 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
315 visit(CE->getOpcode(), *CE);
316 assert(N.Val && "visit didn't populate the ValueMap!");
318 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
319 return N = DAG.getGlobalAddress(GV, VT);
320 } else if (isa<ConstantPointerNull>(C)) {
321 return N = DAG.getConstant(0, TLI.getPointerTy());
322 } else if (isa<UndefValue>(C)) {
323 return N = DAG.getNode(ISD::UNDEF, VT);
324 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
325 return N = DAG.getConstantFP(CFP->getValue(), VT);
326 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
327 unsigned NumElements = PTy->getNumElements();
328 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
329 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
331 // Now that we know the number and type of the elements, push a
332 // Constant or ConstantFP node onto the ops list for each element of
333 // the packed constant.
334 std::vector<SDOperand> Ops;
335 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
336 if (MVT::isFloatingPoint(PVT)) {
337 for (unsigned i = 0; i != NumElements; ++i) {
338 const ConstantFP *El = cast<ConstantFP>(CP->getOperand(i));
339 Ops.push_back(DAG.getConstantFP(El->getValue(), PVT));
342 for (unsigned i = 0; i != NumElements; ++i) {
343 const ConstantIntegral *El =
344 cast<ConstantIntegral>(CP->getOperand(i));
345 Ops.push_back(DAG.getConstant(El->getRawValue(), PVT));
349 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
351 if (MVT::isFloatingPoint(PVT))
352 Op = DAG.getConstantFP(0, PVT);
354 Op = DAG.getConstant(0, PVT);
355 Ops.assign(NumElements, Op);
358 // Handle the case where we have a 1-element vector, in which
359 // case we want to immediately turn it into a scalar constant.
360 if (Ops.size() == 1) {
362 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
363 return N = DAG.getNode(ISD::ConstantVec, TVT, Ops);
365 // If the packed type isn't legal, then create a ConstantVec node with
366 // generic Vector type instead.
367 return N = DAG.getNode(ISD::ConstantVec, MVT::Vector, Ops);
370 // Canonicalize all constant ints to be unsigned.
371 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
374 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
375 std::map<const AllocaInst*, int>::iterator SI =
376 FuncInfo.StaticAllocaMap.find(AI);
377 if (SI != FuncInfo.StaticAllocaMap.end())
378 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
381 std::map<const Value*, unsigned>::const_iterator VMI =
382 FuncInfo.ValueMap.find(V);
383 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
385 unsigned InReg = VMI->second;
387 // If this type is not legal, make it so now.
388 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
390 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
392 // Source must be expanded. This input value is actually coming from the
393 // register pair VMI->second and VMI->second+1.
394 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
395 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
397 if (DestVT > VT) { // Promotion case
398 if (MVT::isFloatingPoint(VT))
399 N = DAG.getNode(ISD::FP_ROUND, VT, N);
401 N = DAG.getNode(ISD::TRUNCATE, VT, N);
408 const SDOperand &setValue(const Value *V, SDOperand NewN) {
409 SDOperand &N = NodeMap[V];
410 assert(N.Val == 0 && "Already set a value for this node!");
414 // Terminator instructions.
415 void visitRet(ReturnInst &I);
416 void visitBr(BranchInst &I);
417 void visitUnreachable(UnreachableInst &I) { /* noop */ }
419 // These all get lowered before this pass.
420 void visitExtractElement(ExtractElementInst &I) { assert(0 && "TODO"); }
421 void visitInsertElement(InsertElementInst &I) { assert(0 && "TODO"); }
422 void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
423 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
424 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
427 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
428 void visitShift(User &I, unsigned Opcode);
429 void visitAdd(User &I) {
430 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
432 void visitSub(User &I);
433 void visitMul(User &I) {
434 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
436 void visitDiv(User &I) {
437 const Type *Ty = I.getType();
438 visitBinary(I, Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 0);
440 void visitRem(User &I) {
441 const Type *Ty = I.getType();
442 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
444 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, 0); }
445 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, 0); }
446 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, 0); }
447 void visitShl(User &I) { visitShift(I, ISD::SHL); }
448 void visitShr(User &I) {
449 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
452 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
453 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
454 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
455 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
456 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
457 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
458 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
460 void visitGetElementPtr(User &I);
461 void visitCast(User &I);
462 void visitSelect(User &I);
465 void visitMalloc(MallocInst &I);
466 void visitFree(FreeInst &I);
467 void visitAlloca(AllocaInst &I);
468 void visitLoad(LoadInst &I);
469 void visitStore(StoreInst &I);
470 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
471 void visitCall(CallInst &I);
472 void visitInlineAsm(CallInst &I);
473 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
475 void visitVAStart(CallInst &I);
476 void visitVAArg(VAArgInst &I);
477 void visitVAEnd(CallInst &I);
478 void visitVACopy(CallInst &I);
479 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
481 void visitMemIntrinsic(CallInst &I, unsigned Op);
483 void visitUserOp1(Instruction &I) {
484 assert(0 && "UserOp1 should not exist at instruction selection time!");
487 void visitUserOp2(Instruction &I) {
488 assert(0 && "UserOp2 should not exist at instruction selection time!");
492 } // end namespace llvm
494 void SelectionDAGLowering::visitRet(ReturnInst &I) {
495 if (I.getNumOperands() == 0) {
496 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
499 std::vector<SDOperand> NewValues;
500 NewValues.push_back(getRoot());
501 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
502 SDOperand RetOp = getValue(I.getOperand(i));
504 // If this is an integer return value, we need to promote it ourselves to
505 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
507 if (MVT::isInteger(RetOp.getValueType()) &&
508 RetOp.getValueType() < MVT::i64) {
509 MVT::ValueType TmpVT;
510 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
511 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
515 if (I.getOperand(i)->getType()->isSigned())
516 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
518 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
520 NewValues.push_back(RetOp);
522 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues));
525 void SelectionDAGLowering::visitBr(BranchInst &I) {
526 // Update machine-CFG edges.
527 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
529 // Figure out which block is immediately after the current one.
530 MachineBasicBlock *NextBlock = 0;
531 MachineFunction::iterator BBI = CurMBB;
532 if (++BBI != CurMBB->getParent()->end())
535 if (I.isUnconditional()) {
536 // If this is not a fall-through branch, emit the branch.
537 if (Succ0MBB != NextBlock)
538 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
539 DAG.getBasicBlock(Succ0MBB)));
541 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
543 SDOperand Cond = getValue(I.getCondition());
544 if (Succ1MBB == NextBlock) {
545 // If the condition is false, fall through. This means we should branch
546 // if the condition is true to Succ #0.
547 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
548 Cond, DAG.getBasicBlock(Succ0MBB)));
549 } else if (Succ0MBB == NextBlock) {
550 // If the condition is true, fall through. This means we should branch if
551 // the condition is false to Succ #1. Invert the condition first.
552 SDOperand True = DAG.getConstant(1, Cond.getValueType());
553 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
554 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
555 Cond, DAG.getBasicBlock(Succ1MBB)));
557 std::vector<SDOperand> Ops;
558 Ops.push_back(getRoot());
560 Ops.push_back(DAG.getBasicBlock(Succ0MBB));
561 Ops.push_back(DAG.getBasicBlock(Succ1MBB));
562 DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
567 void SelectionDAGLowering::visitSub(User &I) {
569 if (I.getType()->isFloatingPoint()) {
570 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
571 if (CFP->isExactlyValue(-0.0)) {
572 SDOperand Op2 = getValue(I.getOperand(1));
573 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
577 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
580 void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
582 const Type *Ty = I.getType();
583 SDOperand Op1 = getValue(I.getOperand(0));
584 SDOperand Op2 = getValue(I.getOperand(1));
586 if (Ty->isIntegral()) {
587 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
588 } else if (Ty->isFloatingPoint()) {
589 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
591 const PackedType *PTy = cast<PackedType>(Ty);
592 unsigned NumElements = PTy->getNumElements();
593 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
594 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
596 // Immediately scalarize packed types containing only one element, so that
597 // the Legalize pass does not have to deal with them. Similarly, if the
598 // abstract vector is going to turn into one that the target natively
599 // supports, generate that type now so that Legalize doesn't have to deal
600 // with that either. These steps ensure that Legalize only has to handle
601 // vector types in its Expand case.
602 unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
603 if (NumElements == 1) {
604 setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
605 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
606 setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
608 SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
609 SDOperand Typ = DAG.getValueType(PVT);
610 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
615 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
616 SDOperand Op1 = getValue(I.getOperand(0));
617 SDOperand Op2 = getValue(I.getOperand(1));
619 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
621 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
624 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
625 ISD::CondCode UnsignedOpcode) {
626 SDOperand Op1 = getValue(I.getOperand(0));
627 SDOperand Op2 = getValue(I.getOperand(1));
628 ISD::CondCode Opcode = SignedOpcode;
629 if (I.getOperand(0)->getType()->isUnsigned())
630 Opcode = UnsignedOpcode;
631 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
634 void SelectionDAGLowering::visitSelect(User &I) {
635 SDOperand Cond = getValue(I.getOperand(0));
636 SDOperand TrueVal = getValue(I.getOperand(1));
637 SDOperand FalseVal = getValue(I.getOperand(2));
638 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
642 void SelectionDAGLowering::visitCast(User &I) {
643 SDOperand N = getValue(I.getOperand(0));
644 MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
645 MVT::ValueType DestTy = TLI.getValueType(I.getType());
647 if (N.getValueType() == DestTy) {
648 setValue(&I, N); // noop cast.
649 } else if (DestTy == MVT::i1) {
650 // Cast to bool is a comparison against zero, not truncation to zero.
651 SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
652 DAG.getConstantFP(0.0, N.getValueType());
653 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
654 } else if (isInteger(SrcTy)) {
655 if (isInteger(DestTy)) { // Int -> Int cast
656 if (DestTy < SrcTy) // Truncating cast?
657 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
658 else if (I.getOperand(0)->getType()->isSigned())
659 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
661 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
662 } else { // Int -> FP cast
663 if (I.getOperand(0)->getType()->isSigned())
664 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
666 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
669 assert(isFloatingPoint(SrcTy) && "Unknown value type!");
670 if (isFloatingPoint(DestTy)) { // FP -> FP cast
671 if (DestTy < SrcTy) // Rounding cast?
672 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
674 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
675 } else { // FP -> Int cast.
676 if (I.getType()->isSigned())
677 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
679 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
684 void SelectionDAGLowering::visitGetElementPtr(User &I) {
685 SDOperand N = getValue(I.getOperand(0));
686 const Type *Ty = I.getOperand(0)->getType();
687 const Type *UIntPtrTy = TD.getIntPtrType();
689 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
692 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
693 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
696 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
697 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
698 getIntPtrConstant(Offset));
700 Ty = StTy->getElementType(Field);
702 Ty = cast<SequentialType>(Ty)->getElementType();
704 // If this is a constant subscript, handle it quickly.
705 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
706 if (CI->getRawValue() == 0) continue;
709 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
710 Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
712 Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
713 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
717 // N = N + Idx * ElementSize;
718 uint64_t ElementSize = TD.getTypeSize(Ty);
719 SDOperand IdxN = getValue(Idx);
721 // If the index is smaller or larger than intptr_t, truncate or extend
723 if (IdxN.getValueType() < N.getValueType()) {
724 if (Idx->getType()->isSigned())
725 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
727 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
728 } else if (IdxN.getValueType() > N.getValueType())
729 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
731 // If this is a multiply by a power of two, turn it into a shl
732 // immediately. This is a very common case.
733 if (isPowerOf2_64(ElementSize)) {
734 unsigned Amt = Log2_64(ElementSize);
735 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
736 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
737 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
741 SDOperand Scale = getIntPtrConstant(ElementSize);
742 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
743 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
749 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
750 // If this is a fixed sized alloca in the entry block of the function,
751 // allocate it statically on the stack.
752 if (FuncInfo.StaticAllocaMap.count(&I))
753 return; // getValue will auto-populate this.
755 const Type *Ty = I.getAllocatedType();
756 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
757 unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
760 SDOperand AllocSize = getValue(I.getArraySize());
761 MVT::ValueType IntPtr = TLI.getPointerTy();
762 if (IntPtr < AllocSize.getValueType())
763 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
764 else if (IntPtr > AllocSize.getValueType())
765 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
767 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
768 getIntPtrConstant(TySize));
770 // Handle alignment. If the requested alignment is less than or equal to the
771 // stack alignment, ignore it and round the size of the allocation up to the
772 // stack alignment size. If the size is greater than the stack alignment, we
773 // note this in the DYNAMIC_STACKALLOC node.
774 unsigned StackAlign =
775 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
776 if (Align <= StackAlign) {
778 // Add SA-1 to the size.
779 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
780 getIntPtrConstant(StackAlign-1));
781 // Mask out the low bits for alignment purposes.
782 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
783 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
786 std::vector<MVT::ValueType> VTs;
787 VTs.push_back(AllocSize.getValueType());
788 VTs.push_back(MVT::Other);
789 std::vector<SDOperand> Ops;
790 Ops.push_back(getRoot());
791 Ops.push_back(AllocSize);
792 Ops.push_back(getIntPtrConstant(Align));
793 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
794 DAG.setRoot(setValue(&I, DSA).getValue(1));
796 // Inform the Frame Information that we have just allocated a variable-sized
798 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
801 /// getStringValue - Turn an LLVM constant pointer that eventually points to a
802 /// global into a string value. Return an empty string if we can't do it.
804 static std::string getStringValue(Value *V, unsigned Offset = 0) {
805 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
806 if (GV->hasInitializer() && isa<ConstantArray>(GV->getInitializer())) {
807 ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
808 if (Init->isString()) {
809 std::string Result = Init->getAsString();
810 if (Offset < Result.size()) {
811 // If we are pointing INTO The string, erase the beginning...
812 Result.erase(Result.begin(), Result.begin()+Offset);
814 // Take off the null terminator, and any string fragments after it.
815 std::string::size_type NullPos = Result.find_first_of((char)0);
816 if (NullPos != std::string::npos)
817 Result.erase(Result.begin()+NullPos, Result.end());
822 } else if (Constant *C = dyn_cast<Constant>(V)) {
823 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
824 return getStringValue(GV, Offset);
825 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
826 if (CE->getOpcode() == Instruction::GetElementPtr) {
827 // Turn a gep into the specified offset.
828 if (CE->getNumOperands() == 3 &&
829 cast<Constant>(CE->getOperand(1))->isNullValue() &&
830 isa<ConstantInt>(CE->getOperand(2))) {
831 return getStringValue(CE->getOperand(0),
832 Offset+cast<ConstantInt>(CE->getOperand(2))->getRawValue());
840 void SelectionDAGLowering::visitLoad(LoadInst &I) {
841 SDOperand Ptr = getValue(I.getOperand(0));
847 // Do not serialize non-volatile loads against each other.
848 Root = DAG.getRoot();
851 const Type *Ty = I.getType();
854 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
855 unsigned NumElements = PTy->getNumElements();
856 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
857 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
859 // Immediately scalarize packed types containing only one element, so that
860 // the Legalize pass does not have to deal with them.
861 if (NumElements == 1) {
862 L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
863 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
864 L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
866 L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
867 DAG.getSrcValue(I.getOperand(0)));
870 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr,
871 DAG.getSrcValue(I.getOperand(0)));
876 DAG.setRoot(L.getValue(1));
878 PendingLoads.push_back(L.getValue(1));
882 void SelectionDAGLowering::visitStore(StoreInst &I) {
883 Value *SrcV = I.getOperand(0);
884 SDOperand Src = getValue(SrcV);
885 SDOperand Ptr = getValue(I.getOperand(1));
886 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
887 DAG.getSrcValue(I.getOperand(1))));
890 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
891 /// we want to emit this as a call to a named external function, return the name
892 /// otherwise lower it and return null.
894 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
896 case Intrinsic::vastart: visitVAStart(I); return 0;
897 case Intrinsic::vaend: visitVAEnd(I); return 0;
898 case Intrinsic::vacopy: visitVACopy(I); return 0;
899 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
900 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
901 case Intrinsic::setjmp:
902 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
904 case Intrinsic::longjmp:
905 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
907 case Intrinsic::memcpy: visitMemIntrinsic(I, ISD::MEMCPY); return 0;
908 case Intrinsic::memset: visitMemIntrinsic(I, ISD::MEMSET); return 0;
909 case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return 0;
911 case Intrinsic::readport:
912 case Intrinsic::readio: {
913 std::vector<MVT::ValueType> VTs;
914 VTs.push_back(TLI.getValueType(I.getType()));
915 VTs.push_back(MVT::Other);
916 std::vector<SDOperand> Ops;
917 Ops.push_back(getRoot());
918 Ops.push_back(getValue(I.getOperand(1)));
919 SDOperand Tmp = DAG.getNode(Intrinsic == Intrinsic::readport ?
920 ISD::READPORT : ISD::READIO, VTs, Ops);
923 DAG.setRoot(Tmp.getValue(1));
926 case Intrinsic::writeport:
927 case Intrinsic::writeio:
928 DAG.setRoot(DAG.getNode(Intrinsic == Intrinsic::writeport ?
929 ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
930 getRoot(), getValue(I.getOperand(1)),
931 getValue(I.getOperand(2))));
934 case Intrinsic::dbg_stoppoint: {
935 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
936 return "llvm_debugger_stop";
938 std::string fname = "<unknown>";
939 std::vector<SDOperand> Ops;
942 Ops.push_back(getRoot());
945 Ops.push_back(getValue(I.getOperand(2)));
948 Ops.push_back(getValue(I.getOperand(3)));
950 // filename/working dir
951 // Pull the filename out of the the compilation unit.
952 const GlobalVariable *cunit = dyn_cast<GlobalVariable>(I.getOperand(4));
953 if (cunit && cunit->hasInitializer()) {
954 if (ConstantStruct *CS =
955 dyn_cast<ConstantStruct>(cunit->getInitializer())) {
956 if (CS->getNumOperands() > 0) {
957 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(3))));
958 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(4))));
963 if (Ops.size() == 5) // Found filename/workingdir.
964 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
965 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
968 case Intrinsic::dbg_region_start:
969 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
970 return "llvm_dbg_region_start";
971 if (I.getType() != Type::VoidTy)
972 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
974 case Intrinsic::dbg_region_end:
975 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
976 return "llvm_dbg_region_end";
977 if (I.getType() != Type::VoidTy)
978 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
980 case Intrinsic::dbg_func_start:
981 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
982 return "llvm_dbg_subprogram";
983 if (I.getType() != Type::VoidTy)
984 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
986 case Intrinsic::dbg_declare:
987 if (I.getType() != Type::VoidTy)
988 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
991 case Intrinsic::isunordered_f32:
992 case Intrinsic::isunordered_f64:
993 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
994 getValue(I.getOperand(2)), ISD::SETUO));
997 case Intrinsic::sqrt_f32:
998 case Intrinsic::sqrt_f64:
999 setValue(&I, DAG.getNode(ISD::FSQRT,
1000 getValue(I.getOperand(1)).getValueType(),
1001 getValue(I.getOperand(1))));
1003 case Intrinsic::pcmarker: {
1004 SDOperand Tmp = getValue(I.getOperand(1));
1005 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1008 case Intrinsic::readcyclecounter: {
1009 std::vector<MVT::ValueType> VTs;
1010 VTs.push_back(MVT::i64);
1011 VTs.push_back(MVT::Other);
1012 std::vector<SDOperand> Ops;
1013 Ops.push_back(getRoot());
1014 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1016 DAG.setRoot(Tmp.getValue(1));
1019 case Intrinsic::bswap_i16:
1020 case Intrinsic::bswap_i32:
1021 case Intrinsic::bswap_i64:
1022 setValue(&I, DAG.getNode(ISD::BSWAP,
1023 getValue(I.getOperand(1)).getValueType(),
1024 getValue(I.getOperand(1))));
1026 case Intrinsic::cttz_i8:
1027 case Intrinsic::cttz_i16:
1028 case Intrinsic::cttz_i32:
1029 case Intrinsic::cttz_i64:
1030 setValue(&I, DAG.getNode(ISD::CTTZ,
1031 getValue(I.getOperand(1)).getValueType(),
1032 getValue(I.getOperand(1))));
1034 case Intrinsic::ctlz_i8:
1035 case Intrinsic::ctlz_i16:
1036 case Intrinsic::ctlz_i32:
1037 case Intrinsic::ctlz_i64:
1038 setValue(&I, DAG.getNode(ISD::CTLZ,
1039 getValue(I.getOperand(1)).getValueType(),
1040 getValue(I.getOperand(1))));
1042 case Intrinsic::ctpop_i8:
1043 case Intrinsic::ctpop_i16:
1044 case Intrinsic::ctpop_i32:
1045 case Intrinsic::ctpop_i64:
1046 setValue(&I, DAG.getNode(ISD::CTPOP,
1047 getValue(I.getOperand(1)).getValueType(),
1048 getValue(I.getOperand(1))));
1050 case Intrinsic::stacksave: {
1051 std::vector<MVT::ValueType> VTs;
1052 VTs.push_back(TLI.getPointerTy());
1053 VTs.push_back(MVT::Other);
1054 std::vector<SDOperand> Ops;
1055 Ops.push_back(getRoot());
1056 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1058 DAG.setRoot(Tmp.getValue(1));
1061 case Intrinsic::stackrestore: {
1062 SDOperand Tmp = getValue(I.getOperand(1));
1063 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1066 case Intrinsic::prefetch:
1067 // FIXME: Currently discarding prefetches.
1071 assert(0 && "This intrinsic is not implemented yet!");
1077 void SelectionDAGLowering::visitCall(CallInst &I) {
1078 const char *RenameFn = 0;
1079 if (Function *F = I.getCalledFunction()) {
1080 if (F->isExternal())
1081 if (unsigned IID = F->getIntrinsicID()) {
1082 RenameFn = visitIntrinsicCall(I, IID);
1085 } else { // Not an LLVM intrinsic.
1086 const std::string &Name = F->getName();
1087 if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1088 if (I.getNumOperands() == 2 && // Basic sanity checks.
1089 I.getOperand(1)->getType()->isFloatingPoint() &&
1090 I.getType() == I.getOperand(1)->getType()) {
1091 SDOperand Tmp = getValue(I.getOperand(1));
1092 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1095 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1096 if (I.getNumOperands() == 2 && // Basic sanity checks.
1097 I.getOperand(1)->getType()->isFloatingPoint() &&
1098 I.getType() == I.getOperand(1)->getType() &&
1099 TLI.isOperationLegal(ISD::FSIN,
1100 TLI.getValueType(I.getOperand(1)->getType()))) {
1101 SDOperand Tmp = getValue(I.getOperand(1));
1102 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1105 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1106 if (I.getNumOperands() == 2 && // Basic sanity checks.
1107 I.getOperand(1)->getType()->isFloatingPoint() &&
1108 I.getType() == I.getOperand(1)->getType() &&
1109 TLI.isOperationLegal(ISD::FCOS,
1110 TLI.getValueType(I.getOperand(1)->getType()))) {
1111 SDOperand Tmp = getValue(I.getOperand(1));
1112 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1117 } else if (isa<InlineAsm>(I.getOperand(0))) {
1124 Callee = getValue(I.getOperand(0));
1126 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1127 std::vector<std::pair<SDOperand, const Type*> > Args;
1128 Args.reserve(I.getNumOperands());
1129 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1130 Value *Arg = I.getOperand(i);
1131 SDOperand ArgNode = getValue(Arg);
1132 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1135 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1136 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1138 std::pair<SDOperand,SDOperand> Result =
1139 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1140 I.isTailCall(), Callee, Args, DAG);
1141 if (I.getType() != Type::VoidTy)
1142 setValue(&I, Result.first);
1143 DAG.setRoot(Result.second);
1146 /// visitInlineAsm - Handle a call to an InlineAsm object.
1148 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1149 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1151 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
1154 // Note, we treat inline asms both with and without side-effects as the same.
1155 // If an inline asm doesn't have side effects and doesn't access memory, we
1156 // could not choose to not chain it.
1157 bool hasSideEffects = IA->hasSideEffects();
1159 std::vector<std::pair<InlineAsm::ConstraintPrefix, std::string> >
1160 Constraints = IA->ParseConstraints();
1163 /// AsmNodeOperands - A list of pairs. The first element is a register, the
1164 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
1165 /// if it is a def of that register.
1166 std::vector<SDOperand> AsmNodeOperands;
1167 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
1168 AsmNodeOperands.push_back(AsmStr);
1170 SDOperand Chain = getRoot();
1173 // FIXME: input copies.
1175 // Finish up input operands.
1176 AsmNodeOperands[0] = Chain;
1177 if (Flag.Val) AsmNodeOperands.push_back(Flag);
1179 std::vector<MVT::ValueType> VTs;
1180 VTs.push_back(MVT::Other);
1181 VTs.push_back(MVT::Flag);
1182 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
1183 Flag = Chain.getValue(1);
1185 // FIXME: Copies out of registers here, setValue(CI).
1191 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
1192 SDOperand Src = getValue(I.getOperand(0));
1194 MVT::ValueType IntPtr = TLI.getPointerTy();
1196 if (IntPtr < Src.getValueType())
1197 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
1198 else if (IntPtr > Src.getValueType())
1199 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
1201 // Scale the source by the type size.
1202 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
1203 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
1204 Src, getIntPtrConstant(ElementSize));
1206 std::vector<std::pair<SDOperand, const Type*> > Args;
1207 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
1209 std::pair<SDOperand,SDOperand> Result =
1210 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
1211 DAG.getExternalSymbol("malloc", IntPtr),
1213 setValue(&I, Result.first); // Pointers always fit in registers
1214 DAG.setRoot(Result.second);
1217 void SelectionDAGLowering::visitFree(FreeInst &I) {
1218 std::vector<std::pair<SDOperand, const Type*> > Args;
1219 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
1220 TLI.getTargetData().getIntPtrType()));
1221 MVT::ValueType IntPtr = TLI.getPointerTy();
1222 std::pair<SDOperand,SDOperand> Result =
1223 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
1224 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
1225 DAG.setRoot(Result.second);
1228 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
1229 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
1230 // instructions are special in various ways, which require special support to
1231 // insert. The specified MachineInstr is created but not inserted into any
1232 // basic blocks, and the scheduler passes ownership of it to this method.
1233 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1234 MachineBasicBlock *MBB) {
1235 std::cerr << "If a target marks an instruction with "
1236 "'usesCustomDAGSchedInserter', it must implement "
1237 "TargetLowering::InsertAtEndOfBasicBlock!\n";
1242 void SelectionDAGLowering::visitVAStart(CallInst &I) {
1243 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
1244 getValue(I.getOperand(1)),
1245 DAG.getSrcValue(I.getOperand(1))));
1248 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
1249 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
1250 getValue(I.getOperand(0)),
1251 DAG.getSrcValue(I.getOperand(0)));
1253 DAG.setRoot(V.getValue(1));
1256 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
1257 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
1258 getValue(I.getOperand(1)),
1259 DAG.getSrcValue(I.getOperand(1))));
1262 void SelectionDAGLowering::visitVACopy(CallInst &I) {
1263 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
1264 getValue(I.getOperand(1)),
1265 getValue(I.getOperand(2)),
1266 DAG.getSrcValue(I.getOperand(1)),
1267 DAG.getSrcValue(I.getOperand(2))));
1270 // It is always conservatively correct for llvm.returnaddress and
1271 // llvm.frameaddress to return 0.
1272 std::pair<SDOperand, SDOperand>
1273 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
1274 unsigned Depth, SelectionDAG &DAG) {
1275 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
1278 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1279 assert(0 && "LowerOperation not implemented for this target!");
1284 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
1285 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
1286 std::pair<SDOperand,SDOperand> Result =
1287 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
1288 setValue(&I, Result.first);
1289 DAG.setRoot(Result.second);
1292 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
1294 // If the size of the cpy/move/set is constant (known)
1295 if (ConstantUInt* op3 = dyn_cast<ConstantUInt>(I.getOperand(3))) {
1296 uint64_t size = op3->getValue();
1299 if (size <= TLI.getMaxStoresPerMemSet()) {
1300 if (ConstantUInt* op4 = dyn_cast<ConstantUInt>(I.getOperand(4))) {
1301 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
1302 uint64_t align = op4.getValue();
1303 while (size > align) {
1306 Value *SrcV = I.getOperand(0);
1307 SDOperand Src = getValue(SrcV);
1308 SDOperand Ptr = getValue(I.getOperand(1));
1309 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1310 DAG.getSrcValue(I.getOperand(1))));
1314 break; // don't do this optimization, use a normal memset
1317 break; // FIXME: not implemented yet
1322 // Non-optimized version
1323 std::vector<SDOperand> Ops;
1324 Ops.push_back(getRoot());
1325 Ops.push_back(getValue(I.getOperand(1)));
1326 Ops.push_back(getValue(I.getOperand(2)));
1327 Ops.push_back(getValue(I.getOperand(3)));
1328 Ops.push_back(getValue(I.getOperand(4)));
1329 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
1332 //===----------------------------------------------------------------------===//
1333 // SelectionDAGISel code
1334 //===----------------------------------------------------------------------===//
1336 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
1337 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
1340 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
1341 // FIXME: we only modify the CFG to split critical edges. This
1342 // updates dom and loop info.
1346 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
1347 /// casting to the type of GEPI.
1348 static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
1349 Value *Ptr, Value *PtrOffset) {
1350 if (V) return V; // Already computed.
1352 BasicBlock::iterator InsertPt;
1353 if (BB == GEPI->getParent()) {
1354 // If insert into the GEP's block, insert right after the GEP.
1358 // Otherwise, insert at the top of BB, after any PHI nodes
1359 InsertPt = BB->begin();
1360 while (isa<PHINode>(InsertPt)) ++InsertPt;
1363 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
1364 // BB so that there is only one value live across basic blocks (the cast
1366 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
1367 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
1368 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
1370 // Add the offset, cast it to the right type.
1371 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
1372 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
1377 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
1378 /// selection, we want to be a bit careful about some things. In particular, if
1379 /// we have a GEP instruction that is used in a different block than it is
1380 /// defined, the addressing expression of the GEP cannot be folded into loads or
1381 /// stores that use it. In this case, decompose the GEP and move constant
1382 /// indices into blocks that use it.
1383 static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
1384 const TargetData &TD) {
1385 // If this GEP is only used inside the block it is defined in, there is no
1386 // need to rewrite it.
1387 bool isUsedOutsideDefBB = false;
1388 BasicBlock *DefBB = GEPI->getParent();
1389 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
1391 if (cast<Instruction>(*UI)->getParent() != DefBB) {
1392 isUsedOutsideDefBB = true;
1396 if (!isUsedOutsideDefBB) return;
1398 // If this GEP has no non-zero constant indices, there is nothing we can do,
1400 bool hasConstantIndex = false;
1401 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1402 E = GEPI->op_end(); OI != E; ++OI) {
1403 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
1404 if (CI->getRawValue()) {
1405 hasConstantIndex = true;
1409 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
1410 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
1412 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
1413 // constant offset (which we now know is non-zero) and deal with it later.
1414 uint64_t ConstantOffset = 0;
1415 const Type *UIntPtrTy = TD.getIntPtrType();
1416 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
1417 const Type *Ty = GEPI->getOperand(0)->getType();
1419 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1420 E = GEPI->op_end(); OI != E; ++OI) {
1422 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1423 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1425 ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
1426 Ty = StTy->getElementType(Field);
1428 Ty = cast<SequentialType>(Ty)->getElementType();
1430 // Handle constant subscripts.
1431 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1432 if (CI->getRawValue() == 0) continue;
1434 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1435 ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
1437 ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1441 // Ptr = Ptr + Idx * ElementSize;
1443 // Cast Idx to UIntPtrTy if needed.
1444 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
1446 uint64_t ElementSize = TD.getTypeSize(Ty);
1447 // Mask off bits that should not be set.
1448 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1449 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
1451 // Multiply by the element size and add to the base.
1452 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
1453 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
1457 // Make sure that the offset fits in uintptr_t.
1458 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1459 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
1461 // Okay, we have now emitted all of the variable index parts to the BB that
1462 // the GEP is defined in. Loop over all of the using instructions, inserting
1463 // an "add Ptr, ConstantOffset" into each block that uses it and update the
1464 // instruction to use the newly computed value, making GEPI dead. When the
1465 // user is a load or store instruction address, we emit the add into the user
1466 // block, otherwise we use a canonical version right next to the gep (these
1467 // won't be foldable as addresses, so we might as well share the computation).
1469 std::map<BasicBlock*,Value*> InsertedExprs;
1470 while (!GEPI->use_empty()) {
1471 Instruction *User = cast<Instruction>(GEPI->use_back());
1473 // If this use is not foldable into the addressing mode, use a version
1474 // emitted in the GEP block.
1476 if (!isa<LoadInst>(User) &&
1477 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
1478 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
1481 // Otherwise, insert the code in the User's block so it can be folded into
1482 // any users in that block.
1483 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
1484 User->getParent(), GEPI,
1487 User->replaceUsesOfWith(GEPI, NewVal);
1490 // Finally, the GEP is dead, remove it.
1491 GEPI->eraseFromParent();
1494 bool SelectionDAGISel::runOnFunction(Function &Fn) {
1495 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
1496 RegMap = MF.getSSARegMap();
1497 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
1499 // First, split all critical edges for PHI nodes with incoming values that are
1500 // constants, this way the load of the constant into a vreg will not be placed
1501 // into MBBs that are used some other way.
1503 // In this pass we also look for GEP instructions that are used across basic
1504 // blocks and rewrites them to improve basic-block-at-a-time selection.
1506 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
1508 BasicBlock::iterator BBI;
1509 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
1510 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1511 if (isa<Constant>(PN->getIncomingValue(i)))
1512 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
1514 for (BasicBlock::iterator E = BB->end(); BBI != E; )
1515 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
1516 OptimizeGEPExpression(GEPI, TLI.getTargetData());
1519 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
1521 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
1522 SelectBasicBlock(I, MF, FuncInfo);
1528 SDOperand SelectionDAGISel::
1529 CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
1530 SDOperand Op = SDL.getValue(V);
1531 assert((Op.getOpcode() != ISD::CopyFromReg ||
1532 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
1533 "Copy from a reg to the same reg!");
1535 // If this type is not legal, we must make sure to not create an invalid
1537 MVT::ValueType SrcVT = Op.getValueType();
1538 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
1539 SelectionDAG &DAG = SDL.DAG;
1540 if (SrcVT == DestVT) {
1541 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1542 } else if (SrcVT < DestVT) {
1543 // The src value is promoted to the register.
1544 if (MVT::isFloatingPoint(SrcVT))
1545 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
1547 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
1548 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1550 // The src value is expanded into multiple registers.
1551 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1552 Op, DAG.getConstant(0, MVT::i32));
1553 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1554 Op, DAG.getConstant(1, MVT::i32));
1555 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
1556 return DAG.getCopyToReg(Op, Reg+1, Hi);
1560 void SelectionDAGISel::
1561 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
1562 std::vector<SDOperand> &UnorderedChains) {
1563 // If this is the entry block, emit arguments.
1564 Function &F = *BB->getParent();
1565 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
1566 SDOperand OldRoot = SDL.DAG.getRoot();
1567 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
1570 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
1572 if (!AI->use_empty()) {
1573 SDL.setValue(AI, Args[a]);
1575 // If this argument is live outside of the entry block, insert a copy from
1576 // whereever we got it to the vreg that other BB's will reference it as.
1577 if (FuncInfo.ValueMap.count(AI)) {
1579 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
1580 UnorderedChains.push_back(Copy);
1584 // Next, if the function has live ins that need to be copied into vregs,
1585 // emit the copies now, into the top of the block.
1586 MachineFunction &MF = SDL.DAG.getMachineFunction();
1587 if (MF.livein_begin() != MF.livein_end()) {
1588 SSARegMap *RegMap = MF.getSSARegMap();
1589 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo();
1590 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1591 E = MF.livein_end(); LI != E; ++LI)
1593 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
1594 LI->first, RegMap->getRegClass(LI->second));
1597 // Finally, if the target has anything special to do, allow it to do so.
1598 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
1602 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
1603 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
1604 FunctionLoweringInfo &FuncInfo) {
1605 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
1607 std::vector<SDOperand> UnorderedChains;
1609 // Lower any arguments needed in this block if this is the entry block.
1610 if (LLVMBB == &LLVMBB->getParent()->front())
1611 LowerArguments(LLVMBB, SDL, UnorderedChains);
1613 BB = FuncInfo.MBBMap[LLVMBB];
1614 SDL.setCurrentBasicBlock(BB);
1616 // Lower all of the non-terminator instructions.
1617 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
1621 // Ensure that all instructions which are used outside of their defining
1622 // blocks are available as virtual registers.
1623 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
1624 if (!I->use_empty() && !isa<PHINode>(I)) {
1625 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
1626 if (VMI != FuncInfo.ValueMap.end())
1627 UnorderedChains.push_back(
1628 CopyValueToVirtualRegister(SDL, I, VMI->second));
1631 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
1632 // ensure constants are generated when needed. Remember the virtual registers
1633 // that need to be added to the Machine PHI nodes as input. We cannot just
1634 // directly add them, because expansion might result in multiple MBB's for one
1635 // BB. As such, the start of the BB might correspond to a different MBB than
1639 // Emit constants only once even if used by multiple PHI nodes.
1640 std::map<Constant*, unsigned> ConstantsOut;
1642 // Check successor nodes PHI nodes that expect a constant to be available from
1644 TerminatorInst *TI = LLVMBB->getTerminator();
1645 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1646 BasicBlock *SuccBB = TI->getSuccessor(succ);
1647 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
1650 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1651 // nodes and Machine PHI nodes, but the incoming operands have not been
1653 for (BasicBlock::iterator I = SuccBB->begin();
1654 (PN = dyn_cast<PHINode>(I)); ++I)
1655 if (!PN->use_empty()) {
1657 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1658 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
1659 unsigned &RegOut = ConstantsOut[C];
1661 RegOut = FuncInfo.CreateRegForValue(C);
1662 UnorderedChains.push_back(
1663 CopyValueToVirtualRegister(SDL, C, RegOut));
1667 Reg = FuncInfo.ValueMap[PHIOp];
1669 assert(isa<AllocaInst>(PHIOp) &&
1670 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
1671 "Didn't codegen value into a register!??");
1672 Reg = FuncInfo.CreateRegForValue(PHIOp);
1673 UnorderedChains.push_back(
1674 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
1678 // Remember that this register needs to added to the machine PHI node as
1679 // the input for this MBB.
1680 unsigned NumElements =
1681 TLI.getNumElements(TLI.getValueType(PN->getType()));
1682 for (unsigned i = 0, e = NumElements; i != e; ++i)
1683 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
1686 ConstantsOut.clear();
1688 // Turn all of the unordered chains into one factored node.
1689 if (!UnorderedChains.empty()) {
1690 SDOperand Root = SDL.getRoot();
1691 if (Root.getOpcode() != ISD::EntryToken) {
1692 unsigned i = 0, e = UnorderedChains.size();
1693 for (; i != e; ++i) {
1694 assert(UnorderedChains[i].Val->getNumOperands() > 1);
1695 if (UnorderedChains[i].Val->getOperand(0) == Root)
1696 break; // Don't add the root if we already indirectly depend on it.
1700 UnorderedChains.push_back(Root);
1702 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
1705 // Lower the terminator after the copies are emitted.
1706 SDL.visit(*LLVMBB->getTerminator());
1708 // Make sure the root of the DAG is up-to-date.
1709 DAG.setRoot(SDL.getRoot());
1712 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
1713 FunctionLoweringInfo &FuncInfo) {
1714 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
1716 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
1718 // First step, lower LLVM code to some DAG. This DAG may use operations and
1719 // types that are not supported by the target.
1720 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
1722 // Run the DAG combiner in pre-legalize mode.
1725 DEBUG(std::cerr << "Lowered selection DAG:\n");
1728 // Second step, hack on the DAG until it only uses operations and types that
1729 // the target supports.
1732 DEBUG(std::cerr << "Legalized selection DAG:\n");
1735 // Run the DAG combiner in post-legalize mode.
1738 if (ViewISelDAGs) DAG.viewGraph();
1740 // Third, instruction select all of the operations to machine code, adding the
1741 // code to the MachineBasicBlock.
1742 InstructionSelectBasicBlock(DAG);
1744 DEBUG(std::cerr << "Selected machine code:\n");
1747 // Next, now that we know what the last MBB the LLVM BB expanded is, update
1748 // PHI nodes in successors.
1749 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
1750 MachineInstr *PHI = PHINodesToUpdate[i].first;
1751 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
1752 "This is not a machine PHI node that we are updating!");
1753 PHI->addRegOperand(PHINodesToUpdate[i].second);
1754 PHI->addMachineBasicBlockOperand(BB);
1757 // Finally, add the CFG edges from the last selected MBB to the successor
1759 TerminatorInst *TI = LLVMBB->getTerminator();
1760 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1761 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
1762 BB->addSuccessor(Succ0MBB);
1766 //===----------------------------------------------------------------------===//
1767 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
1768 /// target node in the graph.
1769 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
1770 if (ViewSchedDAGs) DAG.viewGraph();
1771 ScheduleDAG *SL = NULL;
1773 switch (ISHeuristic) {
1774 default: assert(0 && "Unrecognized scheduling heuristic");
1775 case defaultScheduling:
1776 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
1777 SL = createSimpleDAGScheduler(noScheduling, DAG, BB);
1778 else /* TargetLowering::SchedulingForRegPressure */
1779 SL = createBURRListDAGScheduler(DAG, BB);
1782 case simpleScheduling:
1783 case simpleNoItinScheduling:
1784 SL = createSimpleDAGScheduler(ISHeuristic, DAG, BB);
1786 case listSchedulingBURR:
1787 SL = createBURRListDAGScheduler(DAG, BB);