1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/Analysis/AliasAnalysis.h"
16 #include "llvm/CodeGen/SelectionDAGISel.h"
17 #include "llvm/CodeGen/ScheduleDAG.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/InlineAsm.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/IntrinsicInst.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineJumpTableInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/SchedulerRegistry.h"
33 #include "llvm/CodeGen/SelectionDAG.h"
34 #include "llvm/CodeGen/SSARegMap.h"
35 #include "llvm/Target/MRegisterInfo.h"
36 #include "llvm/Target/TargetAsmInfo.h"
37 #include "llvm/Target/TargetData.h"
38 #include "llvm/Target/TargetFrameInfo.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetLowering.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/Compiler.h"
52 ViewISelDAGs("view-isel-dags", cl::Hidden,
53 cl::desc("Pop up a window to show isel dags as they are selected"));
55 ViewSchedDAGs("view-sched-dags", cl::Hidden,
56 cl::desc("Pop up a window to show sched dags as they are processed"));
58 static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
62 //===---------------------------------------------------------------------===//
64 /// RegisterScheduler class - Track the registration of instruction schedulers.
66 //===---------------------------------------------------------------------===//
67 MachinePassRegistry RegisterScheduler::Registry;
69 //===---------------------------------------------------------------------===//
71 /// ISHeuristic command line option for instruction schedulers.
73 //===---------------------------------------------------------------------===//
75 cl::opt<RegisterScheduler::FunctionPassCtor, false,
76 RegisterPassParser<RegisterScheduler> >
78 cl::init(&createDefaultScheduler),
79 cl::desc("Instruction schedulers available:"));
81 static RegisterScheduler
82 defaultListDAGScheduler("default", " Best scheduler for the target",
83 createDefaultScheduler);
87 /// RegsForValue - This struct represents the physical registers that a
88 /// particular value is assigned and the type information about the value.
89 /// This is needed because values can be promoted into larger registers and
90 /// expanded into multiple smaller registers than the value.
91 struct VISIBILITY_HIDDEN RegsForValue {
92 /// Regs - This list hold the register (for legal and promoted values)
93 /// or register set (for expanded values) that the value should be assigned
95 std::vector<unsigned> Regs;
97 /// RegVT - The value type of each register.
101 /// ValueVT - The value type of the LLVM value, which may be promoted from
102 /// RegVT or made from merging the two expanded parts.
103 MVT::ValueType ValueVT;
105 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
107 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
108 : RegVT(regvt), ValueVT(valuevt) {
111 RegsForValue(const std::vector<unsigned> ®s,
112 MVT::ValueType regvt, MVT::ValueType valuevt)
113 : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
116 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
117 /// this value and returns the result as a ValueVT value. This uses
118 /// Chain/Flag as the input and updates them for the output Chain/Flag.
119 SDOperand getCopyFromRegs(SelectionDAG &DAG,
120 SDOperand &Chain, SDOperand &Flag) const;
122 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
123 /// specified value into the registers specified by this object. This uses
124 /// Chain/Flag as the input and updates them for the output Chain/Flag.
125 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
126 SDOperand &Chain, SDOperand &Flag,
127 MVT::ValueType PtrVT) const;
129 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
130 /// operand list. This adds the code marker and includes the number of
131 /// values added into it.
132 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
133 std::vector<SDOperand> &Ops) const;
138 //===--------------------------------------------------------------------===//
139 /// createDefaultScheduler - This creates an instruction scheduler appropriate
141 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS,
143 MachineBasicBlock *BB) {
144 TargetLowering &TLI = IS->getTargetLowering();
146 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) {
147 return createTDListDAGScheduler(IS, DAG, BB);
149 assert(TLI.getSchedulingPreference() ==
150 TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
151 return createBURRListDAGScheduler(IS, DAG, BB);
156 //===--------------------------------------------------------------------===//
157 /// FunctionLoweringInfo - This contains information that is global to a
158 /// function that is used when lowering a region of the function.
159 class FunctionLoweringInfo {
166 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
168 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
169 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
171 /// ValueMap - Since we emit code for the function a basic block at a time,
172 /// we must remember which virtual registers hold the values for
173 /// cross-basic-block values.
174 DenseMap<const Value*, unsigned> ValueMap;
176 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
177 /// the entry block. This allows the allocas to be efficiently referenced
178 /// anywhere in the function.
179 std::map<const AllocaInst*, int> StaticAllocaMap;
181 unsigned MakeReg(MVT::ValueType VT) {
182 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
185 /// isExportedInst - Return true if the specified value is an instruction
186 /// exported from its block.
187 bool isExportedInst(const Value *V) {
188 return ValueMap.count(V);
191 unsigned CreateRegForValue(const Value *V);
193 unsigned InitializeRegForValue(const Value *V) {
194 unsigned &R = ValueMap[V];
195 assert(R == 0 && "Already initialized this value register!");
196 return R = CreateRegForValue(V);
201 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
202 /// PHI nodes or outside of the basic block that defines it, or used by a
203 /// switch instruction, which may expand to multiple basic blocks.
204 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
205 if (isa<PHINode>(I)) return true;
206 BasicBlock *BB = I->getParent();
207 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
208 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
209 // FIXME: Remove switchinst special case.
210 isa<SwitchInst>(*UI))
215 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
216 /// entry block, return true. This includes arguments used by switches, since
217 /// the switch may expand into multiple basic blocks.
218 static bool isOnlyUsedInEntryBlock(Argument *A) {
219 BasicBlock *Entry = A->getParent()->begin();
220 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
221 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
222 return false; // Use not in entry block.
226 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
227 Function &fn, MachineFunction &mf)
228 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
230 // Create a vreg for each argument register that is not dead and is used
231 // outside of the entry block for the function.
232 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
234 if (!isOnlyUsedInEntryBlock(AI))
235 InitializeRegForValue(AI);
237 // Initialize the mapping of values to registers. This is only set up for
238 // instruction values that are used outside of the block that defines
240 Function::iterator BB = Fn.begin(), EB = Fn.end();
241 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
242 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
243 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
244 const Type *Ty = AI->getAllocatedType();
245 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
247 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
250 TySize *= CUI->getZExtValue(); // Get total allocated size.
251 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
252 StaticAllocaMap[AI] =
253 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
256 for (; BB != EB; ++BB)
257 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
258 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
259 if (!isa<AllocaInst>(I) ||
260 !StaticAllocaMap.count(cast<AllocaInst>(I)))
261 InitializeRegForValue(I);
263 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
264 // also creates the initial PHI MachineInstrs, though none of the input
265 // operands are populated.
266 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
267 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
269 MF.getBasicBlockList().push_back(MBB);
271 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
274 for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){
275 if (PN->use_empty()) continue;
277 MVT::ValueType VT = TLI.getValueType(PN->getType());
278 unsigned NumElements;
279 if (VT != MVT::Vector)
280 NumElements = TLI.getNumElements(VT);
282 MVT::ValueType VT1,VT2;
284 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
287 unsigned PHIReg = ValueMap[PN];
288 assert(PHIReg && "PHI node does not have an assigned virtual register!");
289 const TargetInstrInfo *TII = TLI.getTargetMachine().getInstrInfo();
290 for (unsigned i = 0; i != NumElements; ++i)
291 BuildMI(MBB, TII->get(TargetInstrInfo::PHI), PHIReg+i);
296 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
297 /// the correctly promoted or expanded types. Assign these registers
298 /// consecutive vreg numbers and return the first assigned number.
299 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
300 MVT::ValueType VT = TLI.getValueType(V->getType());
302 // The number of multiples of registers that we need, to, e.g., split up
303 // a <2 x int64> -> 4 x i32 registers.
304 unsigned NumVectorRegs = 1;
306 // If this is a packed type, figure out what type it will decompose into
307 // and how many of the elements it will use.
308 if (VT == MVT::Vector) {
309 const PackedType *PTy = cast<PackedType>(V->getType());
310 unsigned NumElts = PTy->getNumElements();
311 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
313 // Divide the input until we get to a supported size. This will always
314 // end with a scalar if the target doesn't support vectors.
315 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
322 VT = getVectorType(EltTy, NumElts);
325 // The common case is that we will only create one register for this
326 // value. If we have that case, create and return the virtual register.
327 unsigned NV = TLI.getNumElements(VT);
329 // If we are promoting this value, pick the next largest supported type.
330 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
331 unsigned Reg = MakeReg(PromotedType);
332 // If this is a vector of supported or promoted types (e.g. 4 x i16),
333 // create all of the registers.
334 for (unsigned i = 1; i != NumVectorRegs; ++i)
335 MakeReg(PromotedType);
339 // If this value is represented with multiple target registers, make sure
340 // to create enough consecutive registers of the right (smaller) type.
341 VT = TLI.getTypeToExpandTo(VT);
342 unsigned R = MakeReg(VT);
343 for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
348 //===----------------------------------------------------------------------===//
349 /// SelectionDAGLowering - This is the common target-independent lowering
350 /// implementation that is parameterized by a TargetLowering object.
351 /// Also, targets can overload any lowering method.
354 class SelectionDAGLowering {
355 MachineBasicBlock *CurMBB;
357 DenseMap<const Value*, SDOperand> NodeMap;
359 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
360 /// them up and then emit token factor nodes when possible. This allows us to
361 /// get simple disambiguation between loads without worrying about alias
363 std::vector<SDOperand> PendingLoads;
365 /// Case - A pair of values to record the Value for a switch case, and the
366 /// case's target basic block.
367 typedef std::pair<Constant*, MachineBasicBlock*> Case;
368 typedef std::vector<Case>::iterator CaseItr;
369 typedef std::pair<CaseItr, CaseItr> CaseRange;
371 /// CaseRec - A struct with ctor used in lowering switches to a binary tree
372 /// of conditional branches.
374 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
375 CaseBB(bb), LT(lt), GE(ge), Range(r) {}
377 /// CaseBB - The MBB in which to emit the compare and branch
378 MachineBasicBlock *CaseBB;
379 /// LT, GE - If nonzero, we know the current case value must be less-than or
380 /// greater-than-or-equal-to these Constants.
383 /// Range - A pair of iterators representing the range of case values to be
384 /// processed at this point in the binary search tree.
388 /// The comparison function for sorting Case values.
390 bool operator () (const Case& C1, const Case& C2) {
391 assert(isa<ConstantInt>(C1.first) && isa<ConstantInt>(C2.first));
392 return cast<const ConstantInt>(C1.first)->getSExtValue() <
393 cast<const ConstantInt>(C2.first)->getSExtValue();
398 // TLI - This is information that describes the available target features we
399 // need for lowering. This indicates when operations are unavailable,
400 // implemented with a libcall, etc.
403 const TargetData *TD;
405 /// SwitchCases - Vector of CaseBlock structures used to communicate
406 /// SwitchInst code generation information.
407 std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
408 SelectionDAGISel::JumpTable JT;
410 /// FuncInfo - Information about the function as a whole.
412 FunctionLoweringInfo &FuncInfo;
414 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
415 FunctionLoweringInfo &funcinfo)
416 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
417 JT(0,0,0,0), FuncInfo(funcinfo) {
420 /// getRoot - Return the current virtual root of the Selection DAG.
422 SDOperand getRoot() {
423 if (PendingLoads.empty())
424 return DAG.getRoot();
426 if (PendingLoads.size() == 1) {
427 SDOperand Root = PendingLoads[0];
429 PendingLoads.clear();
433 // Otherwise, we have to make a token factor node.
434 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
435 &PendingLoads[0], PendingLoads.size());
436 PendingLoads.clear();
441 SDOperand CopyValueToVirtualRegister(Value *V, unsigned Reg);
443 void visit(Instruction &I) { visit(I.getOpcode(), I); }
445 void visit(unsigned Opcode, User &I) {
446 // Note: this doesn't use InstVisitor, because it has to work with
447 // ConstantExpr's in addition to instructions.
449 default: assert(0 && "Unknown instruction type encountered!");
451 // Build the switch statement using the Instruction.def file.
452 #define HANDLE_INST(NUM, OPCODE, CLASS) \
453 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
454 #include "llvm/Instruction.def"
458 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
460 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
461 const Value *SV, SDOperand Root,
464 SDOperand getIntPtrConstant(uint64_t Val) {
465 return DAG.getConstant(Val, TLI.getPointerTy());
468 SDOperand getValue(const Value *V);
470 void setValue(const Value *V, SDOperand NewN) {
471 SDOperand &N = NodeMap[V];
472 assert(N.Val == 0 && "Already set a value for this node!");
476 RegsForValue GetRegistersForValue(const std::string &ConstrCode,
478 bool OutReg, bool InReg,
479 std::set<unsigned> &OutputRegs,
480 std::set<unsigned> &InputRegs);
482 void FindMergedConditions(Value *Cond, MachineBasicBlock *TBB,
483 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
485 bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB);
486 void ExportFromCurrentBlock(Value *V);
488 // Terminator instructions.
489 void visitRet(ReturnInst &I);
490 void visitBr(BranchInst &I);
491 void visitSwitch(SwitchInst &I);
492 void visitUnreachable(UnreachableInst &I) { /* noop */ }
494 // Helper for visitSwitch
495 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
496 void visitJumpTable(SelectionDAGISel::JumpTable &JT);
498 // These all get lowered before this pass.
499 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
500 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
502 void visitScalarBinary(User &I, unsigned OpCode);
503 void visitVectorBinary(User &I, unsigned OpCode);
504 void visitEitherBinary(User &I, unsigned ScalarOp, unsigned VectorOp);
505 void visitShift(User &I, unsigned Opcode);
506 void visitAdd(User &I) {
507 if (isa<PackedType>(I.getType()))
508 visitVectorBinary(I, ISD::VADD);
509 else if (I.getType()->isFloatingPoint())
510 visitScalarBinary(I, ISD::FADD);
512 visitScalarBinary(I, ISD::ADD);
514 void visitSub(User &I);
515 void visitMul(User &I) {
516 if (isa<PackedType>(I.getType()))
517 visitVectorBinary(I, ISD::VMUL);
518 else if (I.getType()->isFloatingPoint())
519 visitScalarBinary(I, ISD::FMUL);
521 visitScalarBinary(I, ISD::MUL);
523 void visitURem(User &I) { visitScalarBinary(I, ISD::UREM); }
524 void visitSRem(User &I) { visitScalarBinary(I, ISD::SREM); }
525 void visitFRem(User &I) { visitScalarBinary(I, ISD::FREM); }
526 void visitUDiv(User &I) { visitEitherBinary(I, ISD::UDIV, ISD::VUDIV); }
527 void visitSDiv(User &I) { visitEitherBinary(I, ISD::SDIV, ISD::VSDIV); }
528 void visitFDiv(User &I) { visitEitherBinary(I, ISD::FDIV, ISD::VSDIV); }
529 void visitAnd (User &I) { visitEitherBinary(I, ISD::AND, ISD::VAND ); }
530 void visitOr (User &I) { visitEitherBinary(I, ISD::OR, ISD::VOR ); }
531 void visitXor (User &I) { visitEitherBinary(I, ISD::XOR, ISD::VXOR ); }
532 void visitShl (User &I) { visitShift(I, ISD::SHL); }
533 void visitLShr(User &I) { visitShift(I, ISD::SRL); }
534 void visitAShr(User &I) { visitShift(I, ISD::SRA); }
535 void visitICmp(User &I);
536 void visitFCmp(User &I);
537 // Visit the conversion instructions
538 void visitTrunc(User &I);
539 void visitZExt(User &I);
540 void visitSExt(User &I);
541 void visitFPTrunc(User &I);
542 void visitFPExt(User &I);
543 void visitFPToUI(User &I);
544 void visitFPToSI(User &I);
545 void visitUIToFP(User &I);
546 void visitSIToFP(User &I);
547 void visitPtrToInt(User &I);
548 void visitIntToPtr(User &I);
549 void visitBitCast(User &I);
551 void visitExtractElement(User &I);
552 void visitInsertElement(User &I);
553 void visitShuffleVector(User &I);
555 void visitGetElementPtr(User &I);
556 void visitSelect(User &I);
558 void visitMalloc(MallocInst &I);
559 void visitFree(FreeInst &I);
560 void visitAlloca(AllocaInst &I);
561 void visitLoad(LoadInst &I);
562 void visitStore(StoreInst &I);
563 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
564 void visitCall(CallInst &I);
565 void visitInlineAsm(CallInst &I);
566 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
567 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
569 void visitVAStart(CallInst &I);
570 void visitVAArg(VAArgInst &I);
571 void visitVAEnd(CallInst &I);
572 void visitVACopy(CallInst &I);
574 void visitMemIntrinsic(CallInst &I, unsigned Op);
576 void visitUserOp1(Instruction &I) {
577 assert(0 && "UserOp1 should not exist at instruction selection time!");
580 void visitUserOp2(Instruction &I) {
581 assert(0 && "UserOp2 should not exist at instruction selection time!");
585 } // end namespace llvm
587 SDOperand SelectionDAGLowering::getValue(const Value *V) {
588 SDOperand &N = NodeMap[V];
591 const Type *VTy = V->getType();
592 MVT::ValueType VT = TLI.getValueType(VTy);
593 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
594 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
595 visit(CE->getOpcode(), *CE);
596 SDOperand N1 = NodeMap[V];
597 assert(N1.Val && "visit didn't populate the ValueMap!");
599 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
600 return N = DAG.getGlobalAddress(GV, VT);
601 } else if (isa<ConstantPointerNull>(C)) {
602 return N = DAG.getConstant(0, TLI.getPointerTy());
603 } else if (isa<UndefValue>(C)) {
604 if (!isa<PackedType>(VTy))
605 return N = DAG.getNode(ISD::UNDEF, VT);
607 // Create a VBUILD_VECTOR of undef nodes.
608 const PackedType *PTy = cast<PackedType>(VTy);
609 unsigned NumElements = PTy->getNumElements();
610 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
612 SmallVector<SDOperand, 8> Ops;
613 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
615 // Create a VConstant node with generic Vector type.
616 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
617 Ops.push_back(DAG.getValueType(PVT));
618 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
619 &Ops[0], Ops.size());
620 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
621 return N = DAG.getConstantFP(CFP->getValue(), VT);
622 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
623 unsigned NumElements = PTy->getNumElements();
624 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
626 // Now that we know the number and type of the elements, push a
627 // Constant or ConstantFP node onto the ops list for each element of
628 // the packed constant.
629 SmallVector<SDOperand, 8> Ops;
630 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
631 for (unsigned i = 0; i != NumElements; ++i)
632 Ops.push_back(getValue(CP->getOperand(i)));
634 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
636 if (MVT::isFloatingPoint(PVT))
637 Op = DAG.getConstantFP(0, PVT);
639 Op = DAG.getConstant(0, PVT);
640 Ops.assign(NumElements, Op);
643 // Create a VBUILD_VECTOR node with generic Vector type.
644 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
645 Ops.push_back(DAG.getValueType(PVT));
646 return NodeMap[V] = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0],
649 // Canonicalize all constant ints to be unsigned.
650 return N = DAG.getConstant(cast<ConstantInt>(C)->getZExtValue(),VT);
654 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
655 std::map<const AllocaInst*, int>::iterator SI =
656 FuncInfo.StaticAllocaMap.find(AI);
657 if (SI != FuncInfo.StaticAllocaMap.end())
658 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
661 DenseMap<const Value*, unsigned>::iterator VMI =
662 FuncInfo.ValueMap.find(V);
663 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
665 unsigned InReg = VMI->second;
667 // If this type is not legal, make it so now.
668 if (VT != MVT::Vector) {
669 if (TLI.getTypeAction(VT) == TargetLowering::Expand) {
670 // Source must be expanded. This input value is actually coming from the
671 // register pair VMI->second and VMI->second+1.
672 MVT::ValueType DestVT = TLI.getTypeToExpandTo(VT);
673 unsigned NumVals = TLI.getNumElements(VT);
674 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
676 N = DAG.getNode(ISD::BIT_CONVERT, VT, N);
678 assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!");
679 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
680 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
683 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
684 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
685 if (TLI.getTypeAction(VT) == TargetLowering::Promote) // Promotion case
686 N = MVT::isFloatingPoint(VT)
687 ? DAG.getNode(ISD::FP_ROUND, VT, N)
688 : DAG.getNode(ISD::TRUNCATE, VT, N);
691 // Otherwise, if this is a vector, make it available as a generic vector
693 MVT::ValueType PTyElementVT, PTyLegalElementVT;
694 const PackedType *PTy = cast<PackedType>(VTy);
695 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
698 // Build a VBUILD_VECTOR with the input registers.
699 SmallVector<SDOperand, 8> Ops;
700 if (PTyElementVT == PTyLegalElementVT) {
701 // If the value types are legal, just VBUILD the CopyFromReg nodes.
702 for (unsigned i = 0; i != NE; ++i)
703 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
705 } else if (PTyElementVT < PTyLegalElementVT) {
706 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
707 for (unsigned i = 0; i != NE; ++i) {
708 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
710 if (MVT::isFloatingPoint(PTyElementVT))
711 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
713 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
717 // If the register was expanded, use BUILD_PAIR.
718 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
719 for (unsigned i = 0; i != NE/2; ++i) {
720 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
722 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
724 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
728 Ops.push_back(DAG.getConstant(NE, MVT::i32));
729 Ops.push_back(DAG.getValueType(PTyLegalElementVT));
730 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size());
732 // Finally, use a VBIT_CONVERT to make this available as the appropriate
734 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
735 DAG.getConstant(PTy->getNumElements(),
737 DAG.getValueType(TLI.getValueType(PTy->getElementType())));
744 void SelectionDAGLowering::visitRet(ReturnInst &I) {
745 if (I.getNumOperands() == 0) {
746 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
749 SmallVector<SDOperand, 8> NewValues;
750 NewValues.push_back(getRoot());
751 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
752 SDOperand RetOp = getValue(I.getOperand(i));
754 // If this is an integer return value, we need to promote it ourselves to
755 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
757 // FIXME: C calling convention requires the return type to be promoted to
758 // at least 32-bit. But this is not necessary for non-C calling conventions.
759 if (MVT::isInteger(RetOp.getValueType()) &&
760 RetOp.getValueType() < MVT::i64) {
761 MVT::ValueType TmpVT;
762 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
763 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
766 const FunctionType *FTy = I.getParent()->getParent()->getFunctionType();
767 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
768 if (FTy->paramHasAttr(0, FunctionType::SExtAttribute))
769 ExtendKind = ISD::SIGN_EXTEND;
770 if (FTy->paramHasAttr(0, FunctionType::ZExtAttribute))
771 ExtendKind = ISD::ZERO_EXTEND;
772 RetOp = DAG.getNode(ExtendKind, TmpVT, RetOp);
774 NewValues.push_back(RetOp);
775 NewValues.push_back(DAG.getConstant(false, MVT::i32));
777 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other,
778 &NewValues[0], NewValues.size()));
781 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
782 /// the current basic block, add it to ValueMap now so that we'll get a
784 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
785 // No need to export constants.
786 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
789 if (FuncInfo.isExportedInst(V)) return;
791 unsigned Reg = FuncInfo.InitializeRegForValue(V);
792 PendingLoads.push_back(CopyValueToVirtualRegister(V, Reg));
795 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
796 const BasicBlock *FromBB) {
797 // The operands of the setcc have to be in this block. We don't know
798 // how to export them from some other block.
799 if (Instruction *VI = dyn_cast<Instruction>(V)) {
800 // Can export from current BB.
801 if (VI->getParent() == FromBB)
804 // Is already exported, noop.
805 return FuncInfo.isExportedInst(V);
808 // If this is an argument, we can export it if the BB is the entry block or
809 // if it is already exported.
810 if (isa<Argument>(V)) {
811 if (FromBB == &FromBB->getParent()->getEntryBlock())
814 // Otherwise, can only export this if it is already exported.
815 return FuncInfo.isExportedInst(V);
818 // Otherwise, constants can always be exported.
822 static bool InBlock(const Value *V, const BasicBlock *BB) {
823 if (const Instruction *I = dyn_cast<Instruction>(V))
824 return I->getParent() == BB;
828 /// FindMergedConditions - If Cond is an expression like
829 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
830 MachineBasicBlock *TBB,
831 MachineBasicBlock *FBB,
832 MachineBasicBlock *CurBB,
834 // If this node is not part of the or/and tree, emit it as a branch.
835 Instruction *BOp = dyn_cast<Instruction>(Cond);
837 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
838 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
839 BOp->getParent() != CurBB->getBasicBlock() ||
840 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
841 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
842 const BasicBlock *BB = CurBB->getBasicBlock();
844 // If the leaf of the tree is a comparison, merge the condition into
846 if ((isa<ICmpInst>(Cond) || isa<FCmpInst>(Cond)) &&
847 // The operands of the cmp have to be in this block. We don't know
848 // how to export them from some other block. If this is the first block
849 // of the sequence, no exporting is needed.
851 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
852 isExportableFromCurrentBlock(BOp->getOperand(1), BB)))) {
853 BOp = cast<Instruction>(Cond);
854 ISD::CondCode Condition;
855 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
856 switch (IC->getPredicate()) {
857 default: assert(0 && "Unknown icmp predicate opcode!");
858 case ICmpInst::ICMP_EQ: Condition = ISD::SETEQ; break;
859 case ICmpInst::ICMP_NE: Condition = ISD::SETNE; break;
860 case ICmpInst::ICMP_SLE: Condition = ISD::SETLE; break;
861 case ICmpInst::ICMP_ULE: Condition = ISD::SETULE; break;
862 case ICmpInst::ICMP_SGE: Condition = ISD::SETGE; break;
863 case ICmpInst::ICMP_UGE: Condition = ISD::SETUGE; break;
864 case ICmpInst::ICMP_SLT: Condition = ISD::SETLT; break;
865 case ICmpInst::ICMP_ULT: Condition = ISD::SETULT; break;
866 case ICmpInst::ICMP_SGT: Condition = ISD::SETGT; break;
867 case ICmpInst::ICMP_UGT: Condition = ISD::SETUGT; break;
869 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
870 ISD::CondCode FPC, FOC;
871 switch (FC->getPredicate()) {
872 default: assert(0 && "Unknown fcmp predicate opcode!");
873 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
874 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
875 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
876 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
877 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
878 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
879 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
880 case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break;
881 case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break;
882 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
883 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
884 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
885 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
886 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
887 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
888 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
890 if (FiniteOnlyFPMath())
895 Condition = ISD::SETEQ; // silence warning.
896 assert(0 && "Unknown compare instruction");
899 SelectionDAGISel::CaseBlock CB(Condition, BOp->getOperand(0),
900 BOp->getOperand(1), TBB, FBB, CurBB);
901 SwitchCases.push_back(CB);
905 // Create a CaseBlock record representing this branch.
906 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
908 SwitchCases.push_back(CB);
913 // Create TmpBB after CurBB.
914 MachineFunction::iterator BBI = CurBB;
915 MachineBasicBlock *TmpBB = new MachineBasicBlock(CurBB->getBasicBlock());
916 CurBB->getParent()->getBasicBlockList().insert(++BBI, TmpBB);
918 if (Opc == Instruction::Or) {
927 // Emit the LHS condition.
928 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
930 // Emit the RHS condition into TmpBB.
931 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
933 assert(Opc == Instruction::And && "Unknown merge op!");
941 // This requires creation of TmpBB after CurBB.
943 // Emit the LHS condition.
944 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
946 // Emit the RHS condition into TmpBB.
947 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
951 /// If the set of cases should be emitted as a series of branches, return true.
952 /// If we should emit this as a bunch of and/or'd together conditions, return
955 ShouldEmitAsBranches(const std::vector<SelectionDAGISel::CaseBlock> &Cases) {
956 if (Cases.size() != 2) return true;
958 // If this is two comparisons of the same values or'd or and'd together, they
959 // will get folded into a single comparison, so don't emit two blocks.
960 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
961 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
962 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
963 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
970 void SelectionDAGLowering::visitBr(BranchInst &I) {
971 // Update machine-CFG edges.
972 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
974 // Figure out which block is immediately after the current one.
975 MachineBasicBlock *NextBlock = 0;
976 MachineFunction::iterator BBI = CurMBB;
977 if (++BBI != CurMBB->getParent()->end())
980 if (I.isUnconditional()) {
981 // If this is not a fall-through branch, emit the branch.
982 if (Succ0MBB != NextBlock)
983 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
984 DAG.getBasicBlock(Succ0MBB)));
986 // Update machine-CFG edges.
987 CurMBB->addSuccessor(Succ0MBB);
992 // If this condition is one of the special cases we handle, do special stuff
994 Value *CondVal = I.getCondition();
995 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
997 // If this is a series of conditions that are or'd or and'd together, emit
998 // this as a sequence of branches instead of setcc's with and/or operations.
999 // For example, instead of something like:
1012 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1013 if (BOp->hasOneUse() &&
1014 (BOp->getOpcode() == Instruction::And ||
1015 BOp->getOpcode() == Instruction::Or)) {
1016 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1017 // If the compares in later blocks need to use values not currently
1018 // exported from this block, export them now. This block should always
1019 // be the first entry.
1020 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1022 // Allow some cases to be rejected.
1023 if (ShouldEmitAsBranches(SwitchCases)) {
1024 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1025 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1026 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1029 // Emit the branch for this block.
1030 visitSwitchCase(SwitchCases[0]);
1031 SwitchCases.erase(SwitchCases.begin());
1035 // Okay, we decided not to do this, remove any inserted MBB's and clear
1037 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1038 CurMBB->getParent()->getBasicBlockList().erase(SwitchCases[i].ThisBB);
1040 SwitchCases.clear();
1044 // Create a CaseBlock record representing this branch.
1045 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
1046 Succ0MBB, Succ1MBB, CurMBB);
1047 // Use visitSwitchCase to actually insert the fast branch sequence for this
1049 visitSwitchCase(CB);
1052 /// visitSwitchCase - Emits the necessary code to represent a single node in
1053 /// the binary search tree resulting from lowering a switch instruction.
1054 void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
1056 SDOperand CondLHS = getValue(CB.CmpLHS);
1058 // Build the setcc now, fold "(X == true)" to X and "(X == false)" to !X to
1059 // handle common cases produced by branch lowering.
1060 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
1062 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
1063 SDOperand True = DAG.getConstant(1, CondLHS.getValueType());
1064 Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True);
1066 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1068 // Set NextBlock to be the MBB immediately after the current one, if any.
1069 // This is used to avoid emitting unnecessary branches to the next block.
1070 MachineBasicBlock *NextBlock = 0;
1071 MachineFunction::iterator BBI = CurMBB;
1072 if (++BBI != CurMBB->getParent()->end())
1075 // If the lhs block is the next block, invert the condition so that we can
1076 // fall through to the lhs instead of the rhs block.
1077 if (CB.TrueBB == NextBlock) {
1078 std::swap(CB.TrueBB, CB.FalseBB);
1079 SDOperand True = DAG.getConstant(1, Cond.getValueType());
1080 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
1082 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
1083 DAG.getBasicBlock(CB.TrueBB));
1084 if (CB.FalseBB == NextBlock)
1085 DAG.setRoot(BrCond);
1087 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
1088 DAG.getBasicBlock(CB.FalseBB)));
1089 // Update successor info
1090 CurMBB->addSuccessor(CB.TrueBB);
1091 CurMBB->addSuccessor(CB.FalseBB);
1094 void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
1095 // Emit the code for the jump table
1096 MVT::ValueType PTy = TLI.getPointerTy();
1097 SDOperand Index = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy);
1098 SDOperand Table = DAG.getJumpTable(JT.JTI, PTy);
1099 DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1),
1104 void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
1105 // Figure out which block is immediately after the current one.
1106 MachineBasicBlock *NextBlock = 0;
1107 MachineFunction::iterator BBI = CurMBB;
1109 if (++BBI != CurMBB->getParent()->end())
1112 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
1114 // If there is only the default destination, branch to it if it is not the
1115 // next basic block. Otherwise, just fall through.
1116 if (I.getNumOperands() == 2) {
1117 // Update machine-CFG edges.
1119 // If this is not a fall-through branch, emit the branch.
1120 if (Default != NextBlock)
1121 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
1122 DAG.getBasicBlock(Default)));
1124 CurMBB->addSuccessor(Default);
1128 // If there are any non-default case statements, create a vector of Cases
1129 // representing each one, and sort the vector so that we can efficiently
1130 // create a binary search tree from them.
1131 std::vector<Case> Cases;
1133 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
1134 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
1135 Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
1138 std::sort(Cases.begin(), Cases.end(), CaseCmp());
1140 // Get the Value to be switched on and default basic blocks, which will be
1141 // inserted into CaseBlock records, representing basic blocks in the binary
1143 Value *SV = I.getOperand(0);
1145 // Get the MachineFunction which holds the current MBB. This is used during
1146 // emission of jump tables, and when inserting any additional MBBs necessary
1147 // to represent the switch.
1148 MachineFunction *CurMF = CurMBB->getParent();
1149 const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
1151 // If the switch has few cases (two or less) emit a series of specific
1153 if (Cases.size() < 3) {
1154 // TODO: If any two of the cases has the same destination, and if one value
1155 // is the same as the other, but has one bit unset that the other has set,
1156 // use bit manipulation to do two compares at once. For example:
1157 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1159 // Rearrange the case blocks so that the last one falls through if possible.
1160 if (NextBlock && Default != NextBlock && Cases.back().second != NextBlock) {
1161 // The last case block won't fall through into 'NextBlock' if we emit the
1162 // branches in this order. See if rearranging a case value would help.
1163 for (unsigned i = 0, e = Cases.size()-1; i != e; ++i) {
1164 if (Cases[i].second == NextBlock) {
1165 std::swap(Cases[i], Cases.back());
1171 // Create a CaseBlock record representing a conditional branch to
1172 // the Case's target mbb if the value being switched on SV is equal
1174 MachineBasicBlock *CurBlock = CurMBB;
1175 for (unsigned i = 0, e = Cases.size(); i != e; ++i) {
1176 MachineBasicBlock *FallThrough;
1178 FallThrough = new MachineBasicBlock(CurMBB->getBasicBlock());
1179 CurMF->getBasicBlockList().insert(BBI, FallThrough);
1181 // If the last case doesn't match, go to the default block.
1182 FallThrough = Default;
1185 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, Cases[i].first,
1186 Cases[i].second, FallThrough, CurBlock);
1188 // If emitting the first comparison, just call visitSwitchCase to emit the
1189 // code into the current block. Otherwise, push the CaseBlock onto the
1190 // vector to be later processed by SDISel, and insert the node's MBB
1191 // before the next MBB.
1192 if (CurBlock == CurMBB)
1193 visitSwitchCase(CB);
1195 SwitchCases.push_back(CB);
1197 CurBlock = FallThrough;
1202 // If the switch has more than 5 blocks, and at least 31.25% dense, and the
1203 // target supports indirect branches, then emit a jump table rather than
1204 // lowering the switch to a binary tree of conditional branches.
1205 if ((TLI.isOperationLegal(ISD::BR_JT, MVT::Other) ||
1206 TLI.isOperationLegal(ISD::BRIND, MVT::Other)) &&
1208 uint64_t First =cast<ConstantInt>(Cases.front().first)->getZExtValue();
1209 uint64_t Last = cast<ConstantInt>(Cases.back().first)->getZExtValue();
1210 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
1212 if (Density >= 0.3125) {
1213 // Create a new basic block to hold the code for loading the address
1214 // of the jump table, and jumping to it. Update successor information;
1215 // we will either branch to the default case for the switch, or the jump
1217 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB);
1218 CurMF->getBasicBlockList().insert(BBI, JumpTableBB);
1219 CurMBB->addSuccessor(Default);
1220 CurMBB->addSuccessor(JumpTableBB);
1222 // Subtract the lowest switch case value from the value being switched on
1223 // and conditional branch to default mbb if the result is greater than the
1224 // difference between smallest and largest cases.
1225 SDOperand SwitchOp = getValue(SV);
1226 MVT::ValueType VT = SwitchOp.getValueType();
1227 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
1228 DAG.getConstant(First, VT));
1230 // The SDNode we just created, which holds the value being switched on
1231 // minus the the smallest case value, needs to be copied to a virtual
1232 // register so it can be used as an index into the jump table in a
1233 // subsequent basic block. This value may be smaller or larger than the
1234 // target's pointer type, and therefore require extension or truncating.
1235 if (VT > TLI.getPointerTy())
1236 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
1238 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
1240 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1241 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp);
1243 // Emit the range check for the jump table, and branch to the default
1244 // block for the switch statement if the value being switched on exceeds
1245 // the largest case in the switch.
1246 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB,
1247 DAG.getConstant(Last-First,VT), ISD::SETUGT);
1248 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
1249 DAG.getBasicBlock(Default)));
1251 // Build a vector of destination BBs, corresponding to each target
1252 // of the jump table. If the value of the jump table slot corresponds to
1253 // a case statement, push the case's BB onto the vector, otherwise, push
1255 std::vector<MachineBasicBlock*> DestBBs;
1256 uint64_t TEI = First;
1257 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI)
1258 if (cast<ConstantInt>(ii->first)->getZExtValue() == TEI) {
1259 DestBBs.push_back(ii->second);
1262 DestBBs.push_back(Default);
1265 // Update successor info. Add one edge to each unique successor.
1266 // Vector bool would be better, but vector<bool> is really slow.
1267 std::vector<unsigned char> SuccsHandled;
1268 SuccsHandled.resize(CurMBB->getParent()->getNumBlockIDs());
1270 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1271 E = DestBBs.end(); I != E; ++I) {
1272 if (!SuccsHandled[(*I)->getNumber()]) {
1273 SuccsHandled[(*I)->getNumber()] = true;
1274 JumpTableBB->addSuccessor(*I);
1278 // Create a jump table index for this jump table, or return an existing
1280 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1282 // Set the jump table information so that we can codegen it as a second
1283 // MachineBasicBlock
1284 JT.Reg = JumpTableReg;
1286 JT.MBB = JumpTableBB;
1287 JT.Default = Default;
1292 // Push the initial CaseRec onto the worklist
1293 std::vector<CaseRec> CaseVec;
1294 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
1296 while (!CaseVec.empty()) {
1297 // Grab a record representing a case range to process off the worklist
1298 CaseRec CR = CaseVec.back();
1301 // Size is the number of Cases represented by this range. If Size is 1,
1302 // then we are processing a leaf of the binary search tree. Otherwise,
1303 // we need to pick a pivot, and push left and right ranges onto the
1305 unsigned Size = CR.Range.second - CR.Range.first;
1308 // Create a CaseBlock record representing a conditional branch to
1309 // the Case's target mbb if the value being switched on SV is equal
1310 // to C. Otherwise, branch to default.
1311 Constant *C = CR.Range.first->first;
1312 MachineBasicBlock *Target = CR.Range.first->second;
1313 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
1316 // If the MBB representing the leaf node is the current MBB, then just
1317 // call visitSwitchCase to emit the code into the current block.
1318 // Otherwise, push the CaseBlock onto the vector to be later processed
1319 // by SDISel, and insert the node's MBB before the next MBB.
1320 if (CR.CaseBB == CurMBB)
1321 visitSwitchCase(CB);
1323 SwitchCases.push_back(CB);
1325 // split case range at pivot
1326 CaseItr Pivot = CR.Range.first + (Size / 2);
1327 CaseRange LHSR(CR.Range.first, Pivot);
1328 CaseRange RHSR(Pivot, CR.Range.second);
1329 Constant *C = Pivot->first;
1330 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1332 // We know that we branch to the LHS if the Value being switched on is
1333 // less than the Pivot value, C. We use this to optimize our binary
1334 // tree a bit, by recognizing that if SV is greater than or equal to the
1335 // LHS's Case Value, and that Case Value is exactly one less than the
1336 // Pivot's Value, then we can branch directly to the LHS's Target,
1337 // rather than creating a leaf node for it.
1338 if ((LHSR.second - LHSR.first) == 1 &&
1339 LHSR.first->first == CR.GE &&
1340 cast<ConstantInt>(C)->getZExtValue() ==
1341 (cast<ConstantInt>(CR.GE)->getZExtValue() + 1ULL)) {
1342 TrueBB = LHSR.first->second;
1344 TrueBB = new MachineBasicBlock(LLVMBB);
1345 CurMF->getBasicBlockList().insert(BBI, TrueBB);
1346 CaseVec.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1349 // Similar to the optimization above, if the Value being switched on is
1350 // known to be less than the Constant CR.LT, and the current Case Value
1351 // is CR.LT - 1, then we can branch directly to the target block for
1352 // the current Case Value, rather than emitting a RHS leaf node for it.
1353 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1354 cast<ConstantInt>(RHSR.first->first)->getZExtValue() ==
1355 (cast<ConstantInt>(CR.LT)->getZExtValue() - 1ULL)) {
1356 FalseBB = RHSR.first->second;
1358 FalseBB = new MachineBasicBlock(LLVMBB);
1359 CurMF->getBasicBlockList().insert(BBI, FalseBB);
1360 CaseVec.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1363 // Create a CaseBlock record representing a conditional branch to
1364 // the LHS node if the value being switched on SV is less than C.
1365 // Otherwise, branch to LHS.
1366 ISD::CondCode CC = ISD::SETLT;
1367 SelectionDAGISel::CaseBlock CB(CC, SV, C, TrueBB, FalseBB, CR.CaseBB);
1369 if (CR.CaseBB == CurMBB)
1370 visitSwitchCase(CB);
1372 SwitchCases.push_back(CB);
1377 void SelectionDAGLowering::visitSub(User &I) {
1378 // -0.0 - X --> fneg
1379 const Type *Ty = I.getType();
1380 if (isa<PackedType>(Ty)) {
1381 visitVectorBinary(I, ISD::VSUB);
1382 } else if (Ty->isFloatingPoint()) {
1383 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
1384 if (CFP->isExactlyValue(-0.0)) {
1385 SDOperand Op2 = getValue(I.getOperand(1));
1386 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
1389 visitScalarBinary(I, ISD::FSUB);
1391 visitScalarBinary(I, ISD::SUB);
1394 void SelectionDAGLowering::visitScalarBinary(User &I, unsigned OpCode) {
1395 SDOperand Op1 = getValue(I.getOperand(0));
1396 SDOperand Op2 = getValue(I.getOperand(1));
1398 setValue(&I, DAG.getNode(OpCode, Op1.getValueType(), Op1, Op2));
1402 SelectionDAGLowering::visitVectorBinary(User &I, unsigned OpCode) {
1403 assert(isa<PackedType>(I.getType()));
1404 const PackedType *Ty = cast<PackedType>(I.getType());
1405 SDOperand Typ = DAG.getValueType(TLI.getValueType(Ty->getElementType()));
1407 setValue(&I, DAG.getNode(OpCode, MVT::Vector,
1408 getValue(I.getOperand(0)),
1409 getValue(I.getOperand(1)),
1410 DAG.getConstant(Ty->getNumElements(), MVT::i32),
1414 void SelectionDAGLowering::visitEitherBinary(User &I, unsigned ScalarOp,
1415 unsigned VectorOp) {
1416 if (isa<PackedType>(I.getType()))
1417 visitVectorBinary(I, VectorOp);
1419 visitScalarBinary(I, ScalarOp);
1422 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
1423 SDOperand Op1 = getValue(I.getOperand(0));
1424 SDOperand Op2 = getValue(I.getOperand(1));
1426 if (TLI.getShiftAmountTy() < Op2.getValueType())
1427 Op2 = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), Op2);
1428 else if (TLI.getShiftAmountTy() > Op2.getValueType())
1429 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
1431 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
1434 void SelectionDAGLowering::visitICmp(User &I) {
1435 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
1436 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
1437 predicate = IC->getPredicate();
1438 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
1439 predicate = ICmpInst::Predicate(IC->getPredicate());
1440 SDOperand Op1 = getValue(I.getOperand(0));
1441 SDOperand Op2 = getValue(I.getOperand(1));
1442 ISD::CondCode Opcode;
1443 switch (predicate) {
1444 case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break;
1445 case ICmpInst::ICMP_NE : Opcode = ISD::SETNE; break;
1446 case ICmpInst::ICMP_UGT : Opcode = ISD::SETUGT; break;
1447 case ICmpInst::ICMP_UGE : Opcode = ISD::SETUGE; break;
1448 case ICmpInst::ICMP_ULT : Opcode = ISD::SETULT; break;
1449 case ICmpInst::ICMP_ULE : Opcode = ISD::SETULE; break;
1450 case ICmpInst::ICMP_SGT : Opcode = ISD::SETGT; break;
1451 case ICmpInst::ICMP_SGE : Opcode = ISD::SETGE; break;
1452 case ICmpInst::ICMP_SLT : Opcode = ISD::SETLT; break;
1453 case ICmpInst::ICMP_SLE : Opcode = ISD::SETLE; break;
1455 assert(!"Invalid ICmp predicate value");
1456 Opcode = ISD::SETEQ;
1459 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
1462 void SelectionDAGLowering::visitFCmp(User &I) {
1463 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
1464 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
1465 predicate = FC->getPredicate();
1466 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
1467 predicate = FCmpInst::Predicate(FC->getPredicate());
1468 SDOperand Op1 = getValue(I.getOperand(0));
1469 SDOperand Op2 = getValue(I.getOperand(1));
1470 ISD::CondCode Condition, FOC, FPC;
1471 switch (predicate) {
1472 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1473 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1474 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1475 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1476 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1477 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1478 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1479 case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break;
1480 case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break;
1481 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1482 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1483 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1484 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1485 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1486 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1487 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1489 assert(!"Invalid FCmp predicate value");
1490 FOC = FPC = ISD::SETFALSE;
1493 if (FiniteOnlyFPMath())
1497 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition));
1500 void SelectionDAGLowering::visitSelect(User &I) {
1501 SDOperand Cond = getValue(I.getOperand(0));
1502 SDOperand TrueVal = getValue(I.getOperand(1));
1503 SDOperand FalseVal = getValue(I.getOperand(2));
1504 if (!isa<PackedType>(I.getType())) {
1505 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
1506 TrueVal, FalseVal));
1508 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal,
1509 *(TrueVal.Val->op_end()-2),
1510 *(TrueVal.Val->op_end()-1)));
1515 void SelectionDAGLowering::visitTrunc(User &I) {
1516 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
1517 SDOperand N = getValue(I.getOperand(0));
1518 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1519 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1522 void SelectionDAGLowering::visitZExt(User &I) {
1523 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
1524 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
1525 SDOperand N = getValue(I.getOperand(0));
1526 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1527 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1530 void SelectionDAGLowering::visitSExt(User &I) {
1531 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
1532 // SExt also can't be a cast to bool for same reason. So, nothing much to do
1533 SDOperand N = getValue(I.getOperand(0));
1534 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1535 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
1538 void SelectionDAGLowering::visitFPTrunc(User &I) {
1539 // FPTrunc is never a no-op cast, no need to check
1540 SDOperand N = getValue(I.getOperand(0));
1541 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1542 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
1545 void SelectionDAGLowering::visitFPExt(User &I){
1546 // FPTrunc is never a no-op cast, no need to check
1547 SDOperand N = getValue(I.getOperand(0));
1548 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1549 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
1552 void SelectionDAGLowering::visitFPToUI(User &I) {
1553 // FPToUI is never a no-op cast, no need to check
1554 SDOperand N = getValue(I.getOperand(0));
1555 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1556 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
1559 void SelectionDAGLowering::visitFPToSI(User &I) {
1560 // FPToSI is never a no-op cast, no need to check
1561 SDOperand N = getValue(I.getOperand(0));
1562 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1563 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
1566 void SelectionDAGLowering::visitUIToFP(User &I) {
1567 // UIToFP is never a no-op cast, no need to check
1568 SDOperand N = getValue(I.getOperand(0));
1569 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1570 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
1573 void SelectionDAGLowering::visitSIToFP(User &I){
1574 // UIToFP is never a no-op cast, no need to check
1575 SDOperand N = getValue(I.getOperand(0));
1576 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1577 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
1580 void SelectionDAGLowering::visitPtrToInt(User &I) {
1581 // What to do depends on the size of the integer and the size of the pointer.
1582 // We can either truncate, zero extend, or no-op, accordingly.
1583 SDOperand N = getValue(I.getOperand(0));
1584 MVT::ValueType SrcVT = N.getValueType();
1585 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1587 if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT))
1588 Result = DAG.getNode(ISD::TRUNCATE, DestVT, N);
1590 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
1591 Result = DAG.getNode(ISD::ZERO_EXTEND, DestVT, N);
1592 setValue(&I, Result);
1595 void SelectionDAGLowering::visitIntToPtr(User &I) {
1596 // What to do depends on the size of the integer and the size of the pointer.
1597 // We can either truncate, zero extend, or no-op, accordingly.
1598 SDOperand N = getValue(I.getOperand(0));
1599 MVT::ValueType SrcVT = N.getValueType();
1600 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1601 if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT))
1602 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1604 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
1605 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1608 void SelectionDAGLowering::visitBitCast(User &I) {
1609 SDOperand N = getValue(I.getOperand(0));
1610 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1611 if (DestVT == MVT::Vector) {
1612 // This is a cast to a vector from something else.
1613 // Get information about the output vector.
1614 const PackedType *DestTy = cast<PackedType>(I.getType());
1615 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1616 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
1617 DAG.getConstant(DestTy->getNumElements(),MVT::i32),
1618 DAG.getValueType(EltVT)));
1621 MVT::ValueType SrcVT = N.getValueType();
1622 if (SrcVT == MVT::Vector) {
1623 // This is a cast from a vctor to something else.
1624 // Get information about the input vector.
1625 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
1629 // BitCast assures us that source and destination are the same size so this
1630 // is either a BIT_CONVERT or a no-op.
1631 if (DestVT != N.getValueType())
1632 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, DestVT, N)); // convert types
1634 setValue(&I, N); // noop cast.
1637 void SelectionDAGLowering::visitInsertElement(User &I) {
1638 SDOperand InVec = getValue(I.getOperand(0));
1639 SDOperand InVal = getValue(I.getOperand(1));
1640 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1641 getValue(I.getOperand(2)));
1643 SDOperand Num = *(InVec.Val->op_end()-2);
1644 SDOperand Typ = *(InVec.Val->op_end()-1);
1645 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
1646 InVec, InVal, InIdx, Num, Typ));
1649 void SelectionDAGLowering::visitExtractElement(User &I) {
1650 SDOperand InVec = getValue(I.getOperand(0));
1651 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1652 getValue(I.getOperand(1)));
1653 SDOperand Typ = *(InVec.Val->op_end()-1);
1654 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
1655 TLI.getValueType(I.getType()), InVec, InIdx));
1658 void SelectionDAGLowering::visitShuffleVector(User &I) {
1659 SDOperand V1 = getValue(I.getOperand(0));
1660 SDOperand V2 = getValue(I.getOperand(1));
1661 SDOperand Mask = getValue(I.getOperand(2));
1663 SDOperand Num = *(V1.Val->op_end()-2);
1664 SDOperand Typ = *(V2.Val->op_end()-1);
1665 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
1666 V1, V2, Mask, Num, Typ));
1670 void SelectionDAGLowering::visitGetElementPtr(User &I) {
1671 SDOperand N = getValue(I.getOperand(0));
1672 const Type *Ty = I.getOperand(0)->getType();
1674 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
1677 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1678 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
1681 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
1682 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
1683 getIntPtrConstant(Offset));
1685 Ty = StTy->getElementType(Field);
1687 Ty = cast<SequentialType>(Ty)->getElementType();
1689 // If this is a constant subscript, handle it quickly.
1690 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1691 if (CI->getZExtValue() == 0) continue;
1693 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
1694 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
1698 // N = N + Idx * ElementSize;
1699 uint64_t ElementSize = TD->getTypeSize(Ty);
1700 SDOperand IdxN = getValue(Idx);
1702 // If the index is smaller or larger than intptr_t, truncate or extend
1704 if (IdxN.getValueType() < N.getValueType()) {
1705 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
1706 } else if (IdxN.getValueType() > N.getValueType())
1707 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
1709 // If this is a multiply by a power of two, turn it into a shl
1710 // immediately. This is a very common case.
1711 if (isPowerOf2_64(ElementSize)) {
1712 unsigned Amt = Log2_64(ElementSize);
1713 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
1714 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
1715 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1719 SDOperand Scale = getIntPtrConstant(ElementSize);
1720 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
1721 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1727 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
1728 // If this is a fixed sized alloca in the entry block of the function,
1729 // allocate it statically on the stack.
1730 if (FuncInfo.StaticAllocaMap.count(&I))
1731 return; // getValue will auto-populate this.
1733 const Type *Ty = I.getAllocatedType();
1734 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
1736 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
1739 SDOperand AllocSize = getValue(I.getArraySize());
1740 MVT::ValueType IntPtr = TLI.getPointerTy();
1741 if (IntPtr < AllocSize.getValueType())
1742 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
1743 else if (IntPtr > AllocSize.getValueType())
1744 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
1746 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
1747 getIntPtrConstant(TySize));
1749 // Handle alignment. If the requested alignment is less than or equal to the
1750 // stack alignment, ignore it and round the size of the allocation up to the
1751 // stack alignment size. If the size is greater than the stack alignment, we
1752 // note this in the DYNAMIC_STACKALLOC node.
1753 unsigned StackAlign =
1754 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
1755 if (Align <= StackAlign) {
1757 // Add SA-1 to the size.
1758 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
1759 getIntPtrConstant(StackAlign-1));
1760 // Mask out the low bits for alignment purposes.
1761 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
1762 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
1765 SDOperand Ops[] = { getRoot(), AllocSize, getIntPtrConstant(Align) };
1766 const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
1768 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3);
1770 DAG.setRoot(DSA.getValue(1));
1772 // Inform the Frame Information that we have just allocated a variable-sized
1774 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
1777 void SelectionDAGLowering::visitLoad(LoadInst &I) {
1778 SDOperand Ptr = getValue(I.getOperand(0));
1784 // Do not serialize non-volatile loads against each other.
1785 Root = DAG.getRoot();
1788 setValue(&I, getLoadFrom(I.getType(), Ptr, I.getOperand(0),
1789 Root, I.isVolatile()));
1792 SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
1793 const Value *SV, SDOperand Root,
1796 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1797 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
1798 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr,
1799 DAG.getSrcValue(SV));
1801 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, 0, isVolatile);
1805 DAG.setRoot(L.getValue(1));
1807 PendingLoads.push_back(L.getValue(1));
1813 void SelectionDAGLowering::visitStore(StoreInst &I) {
1814 Value *SrcV = I.getOperand(0);
1815 SDOperand Src = getValue(SrcV);
1816 SDOperand Ptr = getValue(I.getOperand(1));
1817 DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1), 0,
1821 /// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
1822 /// access memory and has no other side effects at all.
1823 static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
1824 #define GET_NO_MEMORY_INTRINSICS
1825 #include "llvm/Intrinsics.gen"
1826 #undef GET_NO_MEMORY_INTRINSICS
1830 // IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
1831 // have any side-effects or if it only reads memory.
1832 static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
1833 #define GET_SIDE_EFFECT_INFO
1834 #include "llvm/Intrinsics.gen"
1835 #undef GET_SIDE_EFFECT_INFO
1839 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
1841 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
1842 unsigned Intrinsic) {
1843 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
1844 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
1846 // Build the operand list.
1847 SmallVector<SDOperand, 8> Ops;
1848 if (HasChain) { // If this intrinsic has side-effects, chainify it.
1850 // We don't need to serialize loads against other loads.
1851 Ops.push_back(DAG.getRoot());
1853 Ops.push_back(getRoot());
1857 // Add the intrinsic ID as an integer operand.
1858 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
1860 // Add all operands of the call to the operand list.
1861 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1862 SDOperand Op = getValue(I.getOperand(i));
1864 // If this is a vector type, force it to the right packed type.
1865 if (Op.getValueType() == MVT::Vector) {
1866 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
1867 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
1869 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
1870 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
1871 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
1874 assert(TLI.isTypeLegal(Op.getValueType()) &&
1875 "Intrinsic uses a non-legal type?");
1879 std::vector<MVT::ValueType> VTs;
1880 if (I.getType() != Type::VoidTy) {
1881 MVT::ValueType VT = TLI.getValueType(I.getType());
1882 if (VT == MVT::Vector) {
1883 const PackedType *DestTy = cast<PackedType>(I.getType());
1884 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1886 VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
1887 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
1890 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
1894 VTs.push_back(MVT::Other);
1896 const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs);
1901 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(),
1902 &Ops[0], Ops.size());
1903 else if (I.getType() != Type::VoidTy)
1904 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(),
1905 &Ops[0], Ops.size());
1907 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(),
1908 &Ops[0], Ops.size());
1911 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
1913 PendingLoads.push_back(Chain);
1917 if (I.getType() != Type::VoidTy) {
1918 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
1919 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
1920 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
1921 DAG.getConstant(PTy->getNumElements(), MVT::i32),
1922 DAG.getValueType(EVT));
1924 setValue(&I, Result);
1928 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
1929 /// we want to emit this as a call to a named external function, return the name
1930 /// otherwise lower it and return null.
1932 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1933 switch (Intrinsic) {
1935 // By default, turn this into a target intrinsic node.
1936 visitTargetIntrinsic(I, Intrinsic);
1938 case Intrinsic::vastart: visitVAStart(I); return 0;
1939 case Intrinsic::vaend: visitVAEnd(I); return 0;
1940 case Intrinsic::vacopy: visitVACopy(I); return 0;
1941 case Intrinsic::returnaddress:
1942 setValue(&I, DAG.getNode(ISD::RETURNADDR, TLI.getPointerTy(),
1943 getValue(I.getOperand(1))));
1945 case Intrinsic::frameaddress:
1946 setValue(&I, DAG.getNode(ISD::FRAMEADDR, TLI.getPointerTy(),
1947 getValue(I.getOperand(1))));
1949 case Intrinsic::setjmp:
1950 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
1952 case Intrinsic::longjmp:
1953 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
1955 case Intrinsic::memcpy_i32:
1956 case Intrinsic::memcpy_i64:
1957 visitMemIntrinsic(I, ISD::MEMCPY);
1959 case Intrinsic::memset_i32:
1960 case Intrinsic::memset_i64:
1961 visitMemIntrinsic(I, ISD::MEMSET);
1963 case Intrinsic::memmove_i32:
1964 case Intrinsic::memmove_i64:
1965 visitMemIntrinsic(I, ISD::MEMMOVE);
1968 case Intrinsic::dbg_stoppoint: {
1969 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
1970 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
1971 if (MMI && SPI.getContext() && MMI->Verify(SPI.getContext())) {
1975 Ops[1] = getValue(SPI.getLineValue());
1976 Ops[2] = getValue(SPI.getColumnValue());
1978 DebugInfoDesc *DD = MMI->getDescFor(SPI.getContext());
1979 assert(DD && "Not a debug information descriptor");
1980 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
1982 Ops[3] = DAG.getString(CompileUnit->getFileName());
1983 Ops[4] = DAG.getString(CompileUnit->getDirectory());
1985 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5));
1990 case Intrinsic::dbg_region_start: {
1991 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
1992 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
1993 if (MMI && RSI.getContext() && MMI->Verify(RSI.getContext())) {
1994 unsigned LabelID = MMI->RecordRegionStart(RSI.getContext());
1995 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(),
1996 DAG.getConstant(LabelID, MVT::i32)));
2001 case Intrinsic::dbg_region_end: {
2002 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
2003 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
2004 if (MMI && REI.getContext() && MMI->Verify(REI.getContext())) {
2005 unsigned LabelID = MMI->RecordRegionEnd(REI.getContext());
2006 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other,
2007 getRoot(), DAG.getConstant(LabelID, MVT::i32)));
2012 case Intrinsic::dbg_func_start: {
2013 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
2014 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
2015 if (MMI && FSI.getSubprogram() &&
2016 MMI->Verify(FSI.getSubprogram())) {
2017 unsigned LabelID = MMI->RecordRegionStart(FSI.getSubprogram());
2018 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other,
2019 getRoot(), DAG.getConstant(LabelID, MVT::i32)));
2024 case Intrinsic::dbg_declare: {
2025 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
2026 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
2027 if (MMI && DI.getVariable() && MMI->Verify(DI.getVariable())) {
2028 SDOperand AddressOp = getValue(DI.getAddress());
2029 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp))
2030 MMI->RecordVariable(DI.getVariable(), FI->getIndex());
2036 case Intrinsic::sqrt_f32:
2037 case Intrinsic::sqrt_f64:
2038 setValue(&I, DAG.getNode(ISD::FSQRT,
2039 getValue(I.getOperand(1)).getValueType(),
2040 getValue(I.getOperand(1))));
2042 case Intrinsic::powi_f32:
2043 case Intrinsic::powi_f64:
2044 setValue(&I, DAG.getNode(ISD::FPOWI,
2045 getValue(I.getOperand(1)).getValueType(),
2046 getValue(I.getOperand(1)),
2047 getValue(I.getOperand(2))));
2049 case Intrinsic::pcmarker: {
2050 SDOperand Tmp = getValue(I.getOperand(1));
2051 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
2054 case Intrinsic::readcyclecounter: {
2055 SDOperand Op = getRoot();
2056 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER,
2057 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2,
2060 DAG.setRoot(Tmp.getValue(1));
2063 case Intrinsic::bswap_i16:
2064 case Intrinsic::bswap_i32:
2065 case Intrinsic::bswap_i64:
2066 setValue(&I, DAG.getNode(ISD::BSWAP,
2067 getValue(I.getOperand(1)).getValueType(),
2068 getValue(I.getOperand(1))));
2070 case Intrinsic::cttz_i8:
2071 case Intrinsic::cttz_i16:
2072 case Intrinsic::cttz_i32:
2073 case Intrinsic::cttz_i64:
2074 setValue(&I, DAG.getNode(ISD::CTTZ,
2075 getValue(I.getOperand(1)).getValueType(),
2076 getValue(I.getOperand(1))));
2078 case Intrinsic::ctlz_i8:
2079 case Intrinsic::ctlz_i16:
2080 case Intrinsic::ctlz_i32:
2081 case Intrinsic::ctlz_i64:
2082 setValue(&I, DAG.getNode(ISD::CTLZ,
2083 getValue(I.getOperand(1)).getValueType(),
2084 getValue(I.getOperand(1))));
2086 case Intrinsic::ctpop_i8:
2087 case Intrinsic::ctpop_i16:
2088 case Intrinsic::ctpop_i32:
2089 case Intrinsic::ctpop_i64:
2090 setValue(&I, DAG.getNode(ISD::CTPOP,
2091 getValue(I.getOperand(1)).getValueType(),
2092 getValue(I.getOperand(1))));
2094 case Intrinsic::stacksave: {
2095 SDOperand Op = getRoot();
2096 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE,
2097 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1);
2099 DAG.setRoot(Tmp.getValue(1));
2102 case Intrinsic::stackrestore: {
2103 SDOperand Tmp = getValue(I.getOperand(1));
2104 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
2107 case Intrinsic::prefetch:
2108 // FIXME: Currently discarding prefetches.
2114 void SelectionDAGLowering::visitCall(CallInst &I) {
2115 const char *RenameFn = 0;
2116 if (Function *F = I.getCalledFunction()) {
2117 if (F->isDeclaration())
2118 if (unsigned IID = F->getIntrinsicID()) {
2119 RenameFn = visitIntrinsicCall(I, IID);
2122 } else { // Not an LLVM intrinsic.
2123 const std::string &Name = F->getName();
2124 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
2125 if (I.getNumOperands() == 3 && // Basic sanity checks.
2126 I.getOperand(1)->getType()->isFloatingPoint() &&
2127 I.getType() == I.getOperand(1)->getType() &&
2128 I.getType() == I.getOperand(2)->getType()) {
2129 SDOperand LHS = getValue(I.getOperand(1));
2130 SDOperand RHS = getValue(I.getOperand(2));
2131 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
2135 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
2136 if (I.getNumOperands() == 2 && // Basic sanity checks.
2137 I.getOperand(1)->getType()->isFloatingPoint() &&
2138 I.getType() == I.getOperand(1)->getType()) {
2139 SDOperand Tmp = getValue(I.getOperand(1));
2140 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
2143 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
2144 if (I.getNumOperands() == 2 && // Basic sanity checks.
2145 I.getOperand(1)->getType()->isFloatingPoint() &&
2146 I.getType() == I.getOperand(1)->getType()) {
2147 SDOperand Tmp = getValue(I.getOperand(1));
2148 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
2151 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
2152 if (I.getNumOperands() == 2 && // Basic sanity checks.
2153 I.getOperand(1)->getType()->isFloatingPoint() &&
2154 I.getType() == I.getOperand(1)->getType()) {
2155 SDOperand Tmp = getValue(I.getOperand(1));
2156 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
2161 } else if (isa<InlineAsm>(I.getOperand(0))) {
2166 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
2167 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
2171 Callee = getValue(I.getOperand(0));
2173 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
2174 TargetLowering::ArgListTy Args;
2175 TargetLowering::ArgListEntry Entry;
2176 Args.reserve(I.getNumOperands());
2177 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2178 Value *Arg = I.getOperand(i);
2179 SDOperand ArgNode = getValue(Arg);
2180 Entry.Node = ArgNode; Entry.Ty = Arg->getType();
2181 Entry.isSigned = FTy->paramHasAttr(i, FunctionType::SExtAttribute);
2182 Entry.isInReg = FTy->paramHasAttr(i, FunctionType::InRegAttribute);
2183 Entry.isSRet = FTy->paramHasAttr(i, FunctionType::StructRetAttribute);
2184 Args.push_back(Entry);
2187 std::pair<SDOperand,SDOperand> Result =
2188 TLI.LowerCallTo(getRoot(), I.getType(),
2189 FTy->paramHasAttr(0,FunctionType::SExtAttribute),
2190 FTy->isVarArg(), I.getCallingConv(), I.isTailCall(),
2192 if (I.getType() != Type::VoidTy)
2193 setValue(&I, Result.first);
2194 DAG.setRoot(Result.second);
2197 SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
2198 SDOperand &Chain, SDOperand &Flag)const{
2199 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
2200 Chain = Val.getValue(1);
2201 Flag = Val.getValue(2);
2203 // If the result was expanded, copy from the top part.
2204 if (Regs.size() > 1) {
2205 assert(Regs.size() == 2 &&
2206 "Cannot expand to more than 2 elts yet!");
2207 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
2208 Chain = Hi.getValue(1);
2209 Flag = Hi.getValue(2);
2210 if (DAG.getTargetLoweringInfo().isLittleEndian())
2211 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
2213 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
2216 // Otherwise, if the return value was promoted or extended, truncate it to the
2217 // appropriate type.
2218 if (RegVT == ValueVT)
2221 if (MVT::isInteger(RegVT)) {
2222 if (ValueVT < RegVT)
2223 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
2225 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val);
2227 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
2231 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
2232 /// specified value into the registers specified by this object. This uses
2233 /// Chain/Flag as the input and updates them for the output Chain/Flag.
2234 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
2235 SDOperand &Chain, SDOperand &Flag,
2236 MVT::ValueType PtrVT) const {
2237 if (Regs.size() == 1) {
2238 // If there is a single register and the types differ, this must be
2240 if (RegVT != ValueVT) {
2241 if (MVT::isInteger(RegVT)) {
2242 if (RegVT < ValueVT)
2243 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val);
2245 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
2247 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
2249 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
2250 Flag = Chain.getValue(1);
2252 std::vector<unsigned> R(Regs);
2253 if (!DAG.getTargetLoweringInfo().isLittleEndian())
2254 std::reverse(R.begin(), R.end());
2256 for (unsigned i = 0, e = R.size(); i != e; ++i) {
2257 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
2258 DAG.getConstant(i, PtrVT));
2259 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
2260 Flag = Chain.getValue(1);
2265 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
2266 /// operand list. This adds the code marker and includes the number of
2267 /// values added into it.
2268 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
2269 std::vector<SDOperand> &Ops) const {
2270 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
2271 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
2272 Ops.push_back(DAG.getRegister(Regs[i], RegVT));
2275 /// isAllocatableRegister - If the specified register is safe to allocate,
2276 /// i.e. it isn't a stack pointer or some other special register, return the
2277 /// register class for the register. Otherwise, return null.
2278 static const TargetRegisterClass *
2279 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
2280 const TargetLowering &TLI, const MRegisterInfo *MRI) {
2281 MVT::ValueType FoundVT = MVT::Other;
2282 const TargetRegisterClass *FoundRC = 0;
2283 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
2284 E = MRI->regclass_end(); RCI != E; ++RCI) {
2285 MVT::ValueType ThisVT = MVT::Other;
2287 const TargetRegisterClass *RC = *RCI;
2288 // If none of the the value types for this register class are valid, we
2289 // can't use it. For example, 64-bit reg classes on 32-bit targets.
2290 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
2292 if (TLI.isTypeLegal(*I)) {
2293 // If we have already found this register in a different register class,
2294 // choose the one with the largest VT specified. For example, on
2295 // PowerPC, we favor f64 register classes over f32.
2296 if (FoundVT == MVT::Other ||
2297 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
2304 if (ThisVT == MVT::Other) continue;
2306 // NOTE: This isn't ideal. In particular, this might allocate the
2307 // frame pointer in functions that need it (due to them not being taken
2308 // out of allocation, because a variable sized allocation hasn't been seen
2309 // yet). This is a slight code pessimization, but should still work.
2310 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
2311 E = RC->allocation_order_end(MF); I != E; ++I)
2313 // We found a matching register class. Keep looking at others in case
2314 // we find one with larger registers that this physreg is also in.
2323 RegsForValue SelectionDAGLowering::
2324 GetRegistersForValue(const std::string &ConstrCode,
2325 MVT::ValueType VT, bool isOutReg, bool isInReg,
2326 std::set<unsigned> &OutputRegs,
2327 std::set<unsigned> &InputRegs) {
2328 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
2329 TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
2330 std::vector<unsigned> Regs;
2332 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
2333 MVT::ValueType RegVT;
2334 MVT::ValueType ValueVT = VT;
2336 // If this is a constraint for a specific physical register, like {r17},
2338 if (PhysReg.first) {
2339 if (VT == MVT::Other)
2340 ValueVT = *PhysReg.second->vt_begin();
2342 // Get the actual register value type. This is important, because the user
2343 // may have asked for (e.g.) the AX register in i32 type. We need to
2344 // remember that AX is actually i16 to get the right extension.
2345 RegVT = *PhysReg.second->vt_begin();
2347 // This is a explicit reference to a physical register.
2348 Regs.push_back(PhysReg.first);
2350 // If this is an expanded reference, add the rest of the regs to Regs.
2352 TargetRegisterClass::iterator I = PhysReg.second->begin();
2353 TargetRegisterClass::iterator E = PhysReg.second->end();
2354 for (; *I != PhysReg.first; ++I)
2355 assert(I != E && "Didn't find reg!");
2357 // Already added the first reg.
2359 for (; NumRegs; --NumRegs, ++I) {
2360 assert(I != E && "Ran out of registers to allocate!");
2364 return RegsForValue(Regs, RegVT, ValueVT);
2367 // Otherwise, if this was a reference to an LLVM register class, create vregs
2368 // for this reference.
2369 std::vector<unsigned> RegClassRegs;
2370 if (PhysReg.second) {
2371 // If this is an early clobber or tied register, our regalloc doesn't know
2372 // how to maintain the constraint. If it isn't, go ahead and create vreg
2373 // and let the regalloc do the right thing.
2374 if (!isOutReg || !isInReg) {
2375 if (VT == MVT::Other)
2376 ValueVT = *PhysReg.second->vt_begin();
2377 RegVT = *PhysReg.second->vt_begin();
2379 // Create the appropriate number of virtual registers.
2380 SSARegMap *RegMap = DAG.getMachineFunction().getSSARegMap();
2381 for (; NumRegs; --NumRegs)
2382 Regs.push_back(RegMap->createVirtualRegister(PhysReg.second));
2384 return RegsForValue(Regs, RegVT, ValueVT);
2387 // Otherwise, we can't allocate it. Let the code below figure out how to
2388 // maintain these constraints.
2389 RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end());
2392 // This is a reference to a register class that doesn't directly correspond
2393 // to an LLVM register class. Allocate NumRegs consecutive, available,
2394 // registers from the class.
2395 RegClassRegs = TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
2398 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
2399 MachineFunction &MF = *CurMBB->getParent();
2400 unsigned NumAllocated = 0;
2401 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
2402 unsigned Reg = RegClassRegs[i];
2403 // See if this register is available.
2404 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
2405 (isInReg && InputRegs.count(Reg))) { // Already used.
2406 // Make sure we find consecutive registers.
2411 // Check to see if this register is allocatable (i.e. don't give out the
2413 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
2415 // Make sure we find consecutive registers.
2420 // Okay, this register is good, we can use it.
2423 // If we allocated enough consecutive
2424 if (NumAllocated == NumRegs) {
2425 unsigned RegStart = (i-NumAllocated)+1;
2426 unsigned RegEnd = i+1;
2427 // Mark all of the allocated registers used.
2428 for (unsigned i = RegStart; i != RegEnd; ++i) {
2429 unsigned Reg = RegClassRegs[i];
2430 Regs.push_back(Reg);
2431 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
2432 if (isInReg) InputRegs.insert(Reg); // Mark reg used.
2435 return RegsForValue(Regs, *RC->vt_begin(), VT);
2439 // Otherwise, we couldn't allocate enough registers for this.
2440 return RegsForValue();
2443 /// getConstraintGenerality - Return an integer indicating how general CT is.
2444 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
2446 default: assert(0 && "Unknown constraint type!");
2447 case TargetLowering::C_Other:
2448 case TargetLowering::C_Unknown:
2450 case TargetLowering::C_Register:
2452 case TargetLowering::C_RegisterClass:
2454 case TargetLowering::C_Memory:
2459 static std::string GetMostGeneralConstraint(std::vector<std::string> &C,
2460 const TargetLowering &TLI) {
2461 assert(!C.empty() && "Must have at least one constraint");
2462 if (C.size() == 1) return C[0];
2464 std::string *Current = &C[0];
2465 // If we have multiple constraints, try to pick the most general one ahead
2466 // of time. This isn't a wonderful solution, but handles common cases.
2467 TargetLowering::ConstraintType Flavor = TLI.getConstraintType(Current[0][0]);
2468 for (unsigned j = 1, e = C.size(); j != e; ++j) {
2469 TargetLowering::ConstraintType ThisFlavor = TLI.getConstraintType(C[j][0]);
2470 if (getConstraintGenerality(ThisFlavor) >
2471 getConstraintGenerality(Flavor)) {
2472 // This constraint letter is more general than the previous one,
2474 Flavor = ThisFlavor;
2482 /// visitInlineAsm - Handle a call to an InlineAsm object.
2484 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
2485 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
2487 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
2490 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
2491 std::vector<MVT::ValueType> ConstraintVTs;
2493 /// AsmNodeOperands - A list of pairs. The first element is a register, the
2494 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
2495 /// if it is a def of that register.
2496 std::vector<SDOperand> AsmNodeOperands;
2497 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
2498 AsmNodeOperands.push_back(AsmStr);
2500 SDOperand Chain = getRoot();
2503 // We fully assign registers here at isel time. This is not optimal, but
2504 // should work. For register classes that correspond to LLVM classes, we
2505 // could let the LLVM RA do its thing, but we currently don't. Do a prepass
2506 // over the constraints, collecting fixed registers that we know we can't use.
2507 std::set<unsigned> OutputRegs, InputRegs;
2509 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2510 std::string ConstraintCode =
2511 GetMostGeneralConstraint(Constraints[i].Codes, TLI);
2513 MVT::ValueType OpVT;
2515 // Compute the value type for each operand and add it to ConstraintVTs.
2516 switch (Constraints[i].Type) {
2517 case InlineAsm::isOutput:
2518 if (!Constraints[i].isIndirectOutput) {
2519 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2520 OpVT = TLI.getValueType(I.getType());
2522 const Type *OpTy = I.getOperand(OpNum)->getType();
2523 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
2524 OpNum++; // Consumes a call operand.
2527 case InlineAsm::isInput:
2528 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
2529 OpNum++; // Consumes a call operand.
2531 case InlineAsm::isClobber:
2536 ConstraintVTs.push_back(OpVT);
2538 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
2539 continue; // Not assigned a fixed reg.
2541 // Build a list of regs that this operand uses. This always has a single
2542 // element for promoted/expanded operands.
2543 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
2545 OutputRegs, InputRegs);
2547 switch (Constraints[i].Type) {
2548 case InlineAsm::isOutput:
2549 // We can't assign any other output to this register.
2550 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2551 // If this is an early-clobber output, it cannot be assigned to the same
2552 // value as the input reg.
2553 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2554 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2556 case InlineAsm::isInput:
2557 // We can't assign any other input to this register.
2558 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2560 case InlineAsm::isClobber:
2561 // Clobbered regs cannot be used as inputs or outputs.
2562 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2563 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2568 // Loop over all of the inputs, copying the operand values into the
2569 // appropriate registers and processing the output regs.
2570 RegsForValue RetValRegs;
2571 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
2574 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2575 std::string ConstraintCode =
2576 GetMostGeneralConstraint(Constraints[i].Codes, TLI);
2578 switch (Constraints[i].Type) {
2579 case InlineAsm::isOutput: {
2580 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2581 if (ConstraintCode.size() == 1) // not a physreg name.
2582 CTy = TLI.getConstraintType(ConstraintCode[0]);
2584 if (CTy == TargetLowering::C_Memory) {
2586 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2588 // Check that the operand (the address to store to) isn't a float.
2589 if (!MVT::isInteger(InOperandVal.getValueType()))
2590 assert(0 && "MATCH FAIL!");
2592 if (!Constraints[i].isIndirectOutput)
2593 assert(0 && "MATCH FAIL!");
2595 OpNum++; // Consumes a call operand.
2597 // Extend/truncate to the right pointer type if needed.
2598 MVT::ValueType PtrType = TLI.getPointerTy();
2599 if (InOperandVal.getValueType() < PtrType)
2600 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2601 else if (InOperandVal.getValueType() > PtrType)
2602 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2604 // Add information to the INLINEASM node to know about this output.
2605 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2606 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2607 AsmNodeOperands.push_back(InOperandVal);
2611 // Otherwise, this is a register output.
2612 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2614 // If this is an early-clobber output, or if there is an input
2615 // constraint that matches this, we need to reserve the input register
2616 // so no other inputs allocate to it.
2617 bool UsesInputRegister = false;
2618 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2619 UsesInputRegister = true;
2621 // Copy the output from the appropriate register. Find a register that
2624 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2625 true, UsesInputRegister,
2626 OutputRegs, InputRegs);
2627 if (Regs.Regs.empty()) {
2628 cerr << "Couldn't allocate output reg for contraint '"
2629 << ConstraintCode << "'!\n";
2633 if (!Constraints[i].isIndirectOutput) {
2634 assert(RetValRegs.Regs.empty() &&
2635 "Cannot have multiple output constraints yet!");
2636 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2639 IndirectStoresToEmit.push_back(std::make_pair(Regs,
2640 I.getOperand(OpNum)));
2641 OpNum++; // Consumes a call operand.
2644 // Add information to the INLINEASM node to know that this register is
2646 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
2649 case InlineAsm::isInput: {
2650 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2651 OpNum++; // Consumes a call operand.
2653 if (isdigit(ConstraintCode[0])) { // Matching constraint?
2654 // If this is required to match an output register we have already set,
2655 // just use its register.
2656 unsigned OperandNo = atoi(ConstraintCode.c_str());
2658 // Scan until we find the definition we already emitted of this operand.
2659 // When we find it, create a RegsForValue operand.
2660 unsigned CurOp = 2; // The first operand.
2661 for (; OperandNo; --OperandNo) {
2662 // Advance to the next operand.
2664 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2665 assert(((NumOps & 7) == 2 /*REGDEF*/ ||
2666 (NumOps & 7) == 4 /*MEM*/) &&
2667 "Skipped past definitions?");
2668 CurOp += (NumOps>>3)+1;
2672 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2673 if ((NumOps & 7) == 2 /*REGDEF*/) {
2674 // Add NumOps>>3 registers to MatchedRegs.
2675 RegsForValue MatchedRegs;
2676 MatchedRegs.ValueVT = InOperandVal.getValueType();
2677 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
2678 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
2680 cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
2681 MatchedRegs.Regs.push_back(Reg);
2684 // Use the produced MatchedRegs object to
2685 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag,
2686 TLI.getPointerTy());
2687 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
2690 assert((NumOps & 7) == 4/*MEM*/ && "Unknown matching constraint!");
2691 assert(0 && "matching constraints for memory operands unimp");
2695 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2696 if (ConstraintCode.size() == 1) // not a physreg name.
2697 CTy = TLI.getConstraintType(ConstraintCode[0]);
2699 if (CTy == TargetLowering::C_Other) {
2700 InOperandVal = TLI.isOperandValidForConstraint(InOperandVal,
2701 ConstraintCode[0], DAG);
2702 if (!InOperandVal.Val) {
2703 cerr << "Invalid operand for inline asm constraint '"
2704 << ConstraintCode << "'!\n";
2708 // Add information to the INLINEASM node to know about this input.
2709 unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
2710 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2711 AsmNodeOperands.push_back(InOperandVal);
2713 } else if (CTy == TargetLowering::C_Memory) {
2716 // Check that the operand isn't a float.
2717 if (!MVT::isInteger(InOperandVal.getValueType()))
2718 assert(0 && "MATCH FAIL!");
2720 // Extend/truncate to the right pointer type if needed.
2721 MVT::ValueType PtrType = TLI.getPointerTy();
2722 if (InOperandVal.getValueType() < PtrType)
2723 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2724 else if (InOperandVal.getValueType() > PtrType)
2725 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2727 // Add information to the INLINEASM node to know about this input.
2728 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2729 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2730 AsmNodeOperands.push_back(InOperandVal);
2734 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2736 // Copy the input into the appropriate registers.
2737 RegsForValue InRegs =
2738 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2739 false, true, OutputRegs, InputRegs);
2740 // FIXME: should be match fail.
2741 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
2743 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy());
2745 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
2748 case InlineAsm::isClobber: {
2749 RegsForValue ClobberedRegs =
2750 GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
2751 OutputRegs, InputRegs);
2752 // Add the clobbered value to the operand list, so that the register
2753 // allocator is aware that the physreg got clobbered.
2754 if (!ClobberedRegs.Regs.empty())
2755 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
2761 // Finish up input operands.
2762 AsmNodeOperands[0] = Chain;
2763 if (Flag.Val) AsmNodeOperands.push_back(Flag);
2765 Chain = DAG.getNode(ISD::INLINEASM,
2766 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2,
2767 &AsmNodeOperands[0], AsmNodeOperands.size());
2768 Flag = Chain.getValue(1);
2770 // If this asm returns a register value, copy the result from that register
2771 // and set it as the value of the call.
2772 if (!RetValRegs.Regs.empty())
2773 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
2775 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
2777 // Process indirect outputs, first output all of the flagged copies out of
2779 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
2780 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
2781 Value *Ptr = IndirectStoresToEmit[i].second;
2782 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
2783 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
2786 // Emit the non-flagged stores from the physregs.
2787 SmallVector<SDOperand, 8> OutChains;
2788 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
2789 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first,
2790 getValue(StoresToEmit[i].second),
2791 StoresToEmit[i].second, 0));
2792 if (!OutChains.empty())
2793 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2794 &OutChains[0], OutChains.size());
2799 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
2800 SDOperand Src = getValue(I.getOperand(0));
2802 MVT::ValueType IntPtr = TLI.getPointerTy();
2804 if (IntPtr < Src.getValueType())
2805 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
2806 else if (IntPtr > Src.getValueType())
2807 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
2809 // Scale the source by the type size.
2810 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
2811 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
2812 Src, getIntPtrConstant(ElementSize));
2814 TargetLowering::ArgListTy Args;
2815 TargetLowering::ArgListEntry Entry;
2817 Entry.Ty = TLI.getTargetData()->getIntPtrType();
2818 Entry.isSigned = false;
2819 Entry.isInReg = false;
2820 Entry.isSRet = false;
2821 Args.push_back(Entry);
2823 std::pair<SDOperand,SDOperand> Result =
2824 TLI.LowerCallTo(getRoot(), I.getType(), false, false, CallingConv::C, true,
2825 DAG.getExternalSymbol("malloc", IntPtr),
2827 setValue(&I, Result.first); // Pointers always fit in registers
2828 DAG.setRoot(Result.second);
2831 void SelectionDAGLowering::visitFree(FreeInst &I) {
2832 TargetLowering::ArgListTy Args;
2833 TargetLowering::ArgListEntry Entry;
2834 Entry.Node = getValue(I.getOperand(0));
2835 Entry.Ty = TLI.getTargetData()->getIntPtrType();
2836 Entry.isSigned = false;
2837 Entry.isInReg = false;
2838 Entry.isSRet = false;
2839 Args.push_back(Entry);
2840 MVT::ValueType IntPtr = TLI.getPointerTy();
2841 std::pair<SDOperand,SDOperand> Result =
2842 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, CallingConv::C, true,
2843 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
2844 DAG.setRoot(Result.second);
2847 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
2848 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
2849 // instructions are special in various ways, which require special support to
2850 // insert. The specified MachineInstr is created but not inserted into any
2851 // basic blocks, and the scheduler passes ownership of it to this method.
2852 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2853 MachineBasicBlock *MBB) {
2854 cerr << "If a target marks an instruction with "
2855 << "'usesCustomDAGSchedInserter', it must implement "
2856 << "TargetLowering::InsertAtEndOfBasicBlock!\n";
2861 void SelectionDAGLowering::visitVAStart(CallInst &I) {
2862 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
2863 getValue(I.getOperand(1)),
2864 DAG.getSrcValue(I.getOperand(1))));
2867 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
2868 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
2869 getValue(I.getOperand(0)),
2870 DAG.getSrcValue(I.getOperand(0)));
2872 DAG.setRoot(V.getValue(1));
2875 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
2876 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
2877 getValue(I.getOperand(1)),
2878 DAG.getSrcValue(I.getOperand(1))));
2881 void SelectionDAGLowering::visitVACopy(CallInst &I) {
2882 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
2883 getValue(I.getOperand(1)),
2884 getValue(I.getOperand(2)),
2885 DAG.getSrcValue(I.getOperand(1)),
2886 DAG.getSrcValue(I.getOperand(2))));
2889 /// ExpandScalarFormalArgs - Recursively expand the formal_argument node, either
2890 /// bit_convert it or join a pair of them with a BUILD_PAIR when appropriate.
2891 static SDOperand ExpandScalarFormalArgs(MVT::ValueType VT, SDNode *Arg,
2892 unsigned &i, SelectionDAG &DAG,
2893 TargetLowering &TLI) {
2894 if (TLI.getTypeAction(VT) != TargetLowering::Expand)
2895 return SDOperand(Arg, i++);
2897 MVT::ValueType EVT = TLI.getTypeToTransformTo(VT);
2898 unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT);
2900 return DAG.getNode(ISD::BIT_CONVERT, VT,
2901 ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI));
2902 } else if (NumVals == 2) {
2903 SDOperand Lo = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI);
2904 SDOperand Hi = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI);
2905 if (!TLI.isLittleEndian())
2907 return DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi);
2909 // Value scalarized into many values. Unimp for now.
2910 assert(0 && "Cannot expand i64 -> i16 yet!");
2915 /// TargetLowering::LowerArguments - This is the default LowerArguments
2916 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
2917 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
2918 /// integrated into SDISel.
2919 std::vector<SDOperand>
2920 TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
2921 const FunctionType *FTy = F.getFunctionType();
2922 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
2923 std::vector<SDOperand> Ops;
2924 Ops.push_back(DAG.getRoot());
2925 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
2926 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
2928 // Add one result value for each formal argument.
2929 std::vector<MVT::ValueType> RetVals;
2931 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
2933 MVT::ValueType VT = getValueType(I->getType());
2934 bool isInReg = FTy->paramHasAttr(j, FunctionType::InRegAttribute);
2935 bool isSRet = FTy->paramHasAttr(j, FunctionType::StructRetAttribute);
2936 unsigned OriginalAlignment =
2937 getTargetData()->getABITypeAlignment(I->getType());
2938 // Flags[31:27] -> OriginalAlignment
2939 // Flags[2] -> isSRet
2940 // Flags[1] -> isInReg
2941 unsigned Flags = (isInReg << 1) | (isSRet << 2) | (OriginalAlignment << 27);
2943 switch (getTypeAction(VT)) {
2944 default: assert(0 && "Unknown type action!");
2946 RetVals.push_back(VT);
2947 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
2950 RetVals.push_back(getTypeToTransformTo(VT));
2951 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
2954 if (VT != MVT::Vector) {
2955 // If this is a large integer, it needs to be broken up into small
2956 // integers. Figure out what the destination type is and how many small
2957 // integers it turns into.
2958 MVT::ValueType NVT = getTypeToExpandTo(VT);
2959 unsigned NumVals = getNumElements(VT);
2960 for (unsigned i = 0; i != NumVals; ++i) {
2961 RetVals.push_back(NVT);
2962 // if it isn't first piece, alignment must be 1
2963 if (i == 1) Flags = (Flags & 0x07ffffff) | (1 << 27);
2964 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
2967 // Otherwise, this is a vector type. We only support legal vectors
2969 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
2970 const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
2972 // Figure out if there is a Packed type corresponding to this Vector
2973 // type. If so, convert to the packed type.
2974 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2975 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2976 RetVals.push_back(TVT);
2977 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
2979 assert(0 && "Don't support illegal by-val vector arguments yet!");
2986 RetVals.push_back(MVT::Other);
2989 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS,
2990 DAG.getNodeValueTypes(RetVals), RetVals.size(),
2991 &Ops[0], Ops.size()).Val;
2993 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1));
2995 // Set up the return result vector.
2999 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
3001 MVT::ValueType VT = getValueType(I->getType());
3003 switch (getTypeAction(VT)) {
3004 default: assert(0 && "Unknown type action!");
3006 Ops.push_back(SDOperand(Result, i++));
3009 SDOperand Op(Result, i++);
3010 if (MVT::isInteger(VT)) {
3011 if (FTy->paramHasAttr(Idx, FunctionType::SExtAttribute))
3012 Op = DAG.getNode(ISD::AssertSext, Op.getValueType(), Op,
3013 DAG.getValueType(VT));
3014 else if (FTy->paramHasAttr(Idx, FunctionType::ZExtAttribute))
3015 Op = DAG.getNode(ISD::AssertZext, Op.getValueType(), Op,
3016 DAG.getValueType(VT));
3017 Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
3019 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
3020 Op = DAG.getNode(ISD::FP_ROUND, VT, Op);
3026 if (VT != MVT::Vector) {
3027 // If this is a large integer or a floating point node that needs to be
3028 // expanded, it needs to be reassembled from small integers. Figure out
3029 // what the source elt type is and how many small integers it is.
3030 Ops.push_back(ExpandScalarFormalArgs(VT, Result, i, DAG, *this));
3032 // Otherwise, this is a vector type. We only support legal vectors
3034 const PackedType *PTy = cast<PackedType>(I->getType());
3035 unsigned NumElems = PTy->getNumElements();
3036 const Type *EltTy = PTy->getElementType();
3038 // Figure out if there is a Packed type corresponding to this Vector
3039 // type. If so, convert to the packed type.
3040 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
3041 if (TVT != MVT::Other && isTypeLegal(TVT)) {
3042 SDOperand N = SDOperand(Result, i++);
3043 // Handle copies from generic vectors to registers.
3044 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
3045 DAG.getConstant(NumElems, MVT::i32),
3046 DAG.getValueType(getValueType(EltTy)));
3049 assert(0 && "Don't support illegal by-val vector arguments yet!");
3060 /// ExpandScalarCallArgs - Recursively expand call argument node by
3061 /// bit_converting it or extract a pair of elements from the larger node.
3062 static void ExpandScalarCallArgs(MVT::ValueType VT, SDOperand Arg,
3064 SmallVector<SDOperand, 32> &Ops,
3066 TargetLowering &TLI,
3067 bool isFirst = true) {
3069 if (TLI.getTypeAction(VT) != TargetLowering::Expand) {
3070 // if it isn't first piece, alignment must be 1
3072 Flags = (Flags & 0x07ffffff) | (1 << 27);
3074 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
3078 MVT::ValueType EVT = TLI.getTypeToTransformTo(VT);
3079 unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT);
3081 Arg = DAG.getNode(ISD::BIT_CONVERT, EVT, Arg);
3082 ExpandScalarCallArgs(EVT, Arg, Flags, Ops, DAG, TLI, isFirst);
3083 } else if (NumVals == 2) {
3084 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg,
3085 DAG.getConstant(0, TLI.getPointerTy()));
3086 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg,
3087 DAG.getConstant(1, TLI.getPointerTy()));
3088 if (!TLI.isLittleEndian())
3090 ExpandScalarCallArgs(EVT, Lo, Flags, Ops, DAG, TLI, isFirst);
3091 ExpandScalarCallArgs(EVT, Hi, Flags, Ops, DAG, TLI, false);
3093 // Value scalarized into many values. Unimp for now.
3094 assert(0 && "Cannot expand i64 -> i16 yet!");
3098 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
3099 /// implementation, which just inserts an ISD::CALL node, which is later custom
3100 /// lowered by the target to something concrete. FIXME: When all targets are
3101 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
3102 std::pair<SDOperand, SDOperand>
3103 TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
3104 bool RetTyIsSigned, bool isVarArg,
3105 unsigned CallingConv, bool isTailCall,
3107 ArgListTy &Args, SelectionDAG &DAG) {
3108 SmallVector<SDOperand, 32> Ops;
3109 Ops.push_back(Chain); // Op#0 - Chain
3110 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC
3111 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg
3112 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail
3113 Ops.push_back(Callee);
3115 // Handle all of the outgoing arguments.
3116 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
3117 MVT::ValueType VT = getValueType(Args[i].Ty);
3118 SDOperand Op = Args[i].Node;
3119 bool isSigned = Args[i].isSigned;
3120 bool isInReg = Args[i].isInReg;
3121 bool isSRet = Args[i].isSRet;
3122 unsigned OriginalAlignment =
3123 getTargetData()->getABITypeAlignment(Args[i].Ty);
3124 // Flags[31:27] -> OriginalAlignment
3125 // Flags[2] -> isSRet
3126 // Flags[1] -> isInReg
3127 // Flags[0] -> isSigned
3128 unsigned Flags = (isSRet << 2) | (isInReg << 1) | isSigned |
3129 (OriginalAlignment << 27);
3131 switch (getTypeAction(VT)) {
3132 default: assert(0 && "Unknown type action!");
3135 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
3138 if (MVT::isInteger(VT)) {
3139 unsigned ExtOp = isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3140 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op);
3142 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
3143 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op);
3146 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
3149 if (VT != MVT::Vector) {
3150 // If this is a large integer, it needs to be broken down into small
3151 // integers. Figure out what the source elt type is and how many small
3153 ExpandScalarCallArgs(VT, Op, Flags, Ops, DAG, *this);
3155 // Otherwise, this is a vector type. We only support legal vectors
3157 const PackedType *PTy = cast<PackedType>(Args[i].Ty);
3158 unsigned NumElems = PTy->getNumElements();
3159 const Type *EltTy = PTy->getElementType();
3161 // Figure out if there is a Packed type corresponding to this Vector
3162 // type. If so, convert to the packed type.
3163 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
3164 if (TVT != MVT::Other && isTypeLegal(TVT)) {
3165 // Insert a VBIT_CONVERT of the MVT::Vector type to the packed type.
3166 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op);
3168 Ops.push_back(DAG.getConstant(Flags, MVT::i32));
3170 assert(0 && "Don't support illegal by-val vector call args yet!");
3178 // Figure out the result value types.
3179 SmallVector<MVT::ValueType, 4> RetTys;
3181 if (RetTy != Type::VoidTy) {
3182 MVT::ValueType VT = getValueType(RetTy);
3183 switch (getTypeAction(VT)) {
3184 default: assert(0 && "Unknown type action!");
3186 RetTys.push_back(VT);
3189 RetTys.push_back(getTypeToTransformTo(VT));
3192 if (VT != MVT::Vector) {
3193 // If this is a large integer, it needs to be reassembled from small
3194 // integers. Figure out what the source elt type is and how many small
3196 MVT::ValueType NVT = getTypeToExpandTo(VT);
3197 unsigned NumVals = getNumElements(VT);
3198 for (unsigned i = 0; i != NumVals; ++i)
3199 RetTys.push_back(NVT);
3201 // Otherwise, this is a vector type. We only support legal vectors
3203 const PackedType *PTy = cast<PackedType>(RetTy);
3204 unsigned NumElems = PTy->getNumElements();
3205 const Type *EltTy = PTy->getElementType();
3207 // Figure out if there is a Packed type corresponding to this Vector
3208 // type. If so, convert to the packed type.
3209 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
3210 if (TVT != MVT::Other && isTypeLegal(TVT)) {
3211 RetTys.push_back(TVT);
3213 assert(0 && "Don't support illegal by-val vector call results yet!");
3220 RetTys.push_back(MVT::Other); // Always has a chain.
3222 // Finally, create the CALL node.
3223 SDOperand Res = DAG.getNode(ISD::CALL,
3224 DAG.getVTList(&RetTys[0], RetTys.size()),
3225 &Ops[0], Ops.size());
3227 // This returns a pair of operands. The first element is the
3228 // return value for the function (if RetTy is not VoidTy). The second
3229 // element is the outgoing token chain.
3231 if (RetTys.size() != 1) {
3232 MVT::ValueType VT = getValueType(RetTy);
3233 if (RetTys.size() == 2) {
3236 // If this value was promoted, truncate it down.
3237 if (ResVal.getValueType() != VT) {
3238 if (VT == MVT::Vector) {
3239 // Insert a VBITCONVERT to convert from the packed result type to the
3240 // MVT::Vector type.
3241 unsigned NumElems = cast<PackedType>(RetTy)->getNumElements();
3242 const Type *EltTy = cast<PackedType>(RetTy)->getElementType();
3244 // Figure out if there is a Packed type corresponding to this Vector
3245 // type. If so, convert to the packed type.
3246 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy),NumElems);
3247 if (TVT != MVT::Other && isTypeLegal(TVT)) {
3248 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a
3249 // "N x PTyElementVT" MVT::Vector type.
3250 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal,
3251 DAG.getConstant(NumElems, MVT::i32),
3252 DAG.getValueType(getValueType(EltTy)));
3256 } else if (MVT::isInteger(VT)) {
3257 unsigned AssertOp = ISD::AssertSext;
3259 AssertOp = ISD::AssertZext;
3260 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal,
3261 DAG.getValueType(VT));
3262 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal);
3264 assert(MVT::isFloatingPoint(VT));
3265 if (getTypeAction(VT) == Expand)
3266 ResVal = DAG.getNode(ISD::BIT_CONVERT, VT, ResVal);
3268 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal);
3271 } else if (RetTys.size() == 3) {
3272 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT,
3273 Res.getValue(0), Res.getValue(1));
3276 assert(0 && "Case not handled yet!");
3280 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1));
3283 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
3284 assert(0 && "LowerOperation not implemented for this target!");
3289 SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
3290 SelectionDAG &DAG) {
3291 assert(0 && "CustomPromoteOperation not implemented for this target!");
3296 /// getMemsetValue - Vectorized representation of the memset value
3298 static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
3299 SelectionDAG &DAG) {
3300 MVT::ValueType CurVT = VT;
3301 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3302 uint64_t Val = C->getValue() & 255;
3304 while (CurVT != MVT::i8) {
3305 Val = (Val << Shift) | Val;
3307 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
3309 return DAG.getConstant(Val, VT);
3311 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
3313 while (CurVT != MVT::i8) {
3315 DAG.getNode(ISD::OR, VT,
3316 DAG.getNode(ISD::SHL, VT, Value,
3317 DAG.getConstant(Shift, MVT::i8)), Value);
3319 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
3326 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3327 /// used when a memcpy is turned into a memset when the source is a constant
3329 static SDOperand getMemsetStringVal(MVT::ValueType VT,
3330 SelectionDAG &DAG, TargetLowering &TLI,
3331 std::string &Str, unsigned Offset) {
3333 unsigned MSB = getSizeInBits(VT) / 8;
3334 if (TLI.isLittleEndian())
3335 Offset = Offset + MSB - 1;
3336 for (unsigned i = 0; i != MSB; ++i) {
3337 Val = (Val << 8) | (unsigned char)Str[Offset];
3338 Offset += TLI.isLittleEndian() ? -1 : 1;
3340 return DAG.getConstant(Val, VT);
3343 /// getMemBasePlusOffset - Returns base and offset node for the
3344 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
3345 SelectionDAG &DAG, TargetLowering &TLI) {
3346 MVT::ValueType VT = Base.getValueType();
3347 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
3350 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
3351 /// to replace the memset / memcpy is below the threshold. It also returns the
3352 /// types of the sequence of memory ops to perform memset / memcpy.
3353 static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
3354 unsigned Limit, uint64_t Size,
3355 unsigned Align, TargetLowering &TLI) {
3358 if (TLI.allowsUnalignedMemoryAccesses()) {
3361 switch (Align & 7) {
3377 MVT::ValueType LVT = MVT::i64;
3378 while (!TLI.isTypeLegal(LVT))
3379 LVT = (MVT::ValueType)((unsigned)LVT - 1);
3380 assert(MVT::isInteger(LVT));
3385 unsigned NumMemOps = 0;
3387 unsigned VTSize = getSizeInBits(VT) / 8;
3388 while (VTSize > Size) {
3389 VT = (MVT::ValueType)((unsigned)VT - 1);
3392 assert(MVT::isInteger(VT));
3394 if (++NumMemOps > Limit)
3396 MemOps.push_back(VT);
3403 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
3404 SDOperand Op1 = getValue(I.getOperand(1));
3405 SDOperand Op2 = getValue(I.getOperand(2));
3406 SDOperand Op3 = getValue(I.getOperand(3));
3407 SDOperand Op4 = getValue(I.getOperand(4));
3408 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
3409 if (Align == 0) Align = 1;
3411 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
3412 std::vector<MVT::ValueType> MemOps;
3414 // Expand memset / memcpy to a series of load / store ops
3415 // if the size operand falls below a certain threshold.
3416 SmallVector<SDOperand, 8> OutChains;
3418 default: break; // Do nothing for now.
3420 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
3421 Size->getValue(), Align, TLI)) {
3422 unsigned NumMemOps = MemOps.size();
3423 unsigned Offset = 0;
3424 for (unsigned i = 0; i < NumMemOps; i++) {
3425 MVT::ValueType VT = MemOps[i];
3426 unsigned VTSize = getSizeInBits(VT) / 8;
3427 SDOperand Value = getMemsetValue(Op2, VT, DAG);
3428 SDOperand Store = DAG.getStore(getRoot(), Value,
3429 getMemBasePlusOffset(Op1, Offset, DAG, TLI),
3430 I.getOperand(1), Offset);
3431 OutChains.push_back(Store);
3438 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
3439 Size->getValue(), Align, TLI)) {
3440 unsigned NumMemOps = MemOps.size();
3441 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
3442 GlobalAddressSDNode *G = NULL;
3444 bool CopyFromStr = false;
3446 if (Op2.getOpcode() == ISD::GlobalAddress)
3447 G = cast<GlobalAddressSDNode>(Op2);
3448 else if (Op2.getOpcode() == ISD::ADD &&
3449 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3450 Op2.getOperand(1).getOpcode() == ISD::Constant) {
3451 G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
3452 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
3455 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
3456 if (GV && GV->isConstant()) {
3457 Str = GV->getStringValue(false);
3465 for (unsigned i = 0; i < NumMemOps; i++) {
3466 MVT::ValueType VT = MemOps[i];
3467 unsigned VTSize = getSizeInBits(VT) / 8;
3468 SDOperand Value, Chain, Store;
3471 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
3474 DAG.getStore(Chain, Value,
3475 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
3476 I.getOperand(1), DstOff);
3478 Value = DAG.getLoad(VT, getRoot(),
3479 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
3480 I.getOperand(2), SrcOff);
3481 Chain = Value.getValue(1);
3483 DAG.getStore(Chain, Value,
3484 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
3485 I.getOperand(1), DstOff);
3487 OutChains.push_back(Store);
3496 if (!OutChains.empty()) {
3497 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other,
3498 &OutChains[0], OutChains.size()));
3503 DAG.setRoot(DAG.getNode(Op, MVT::Other, getRoot(), Op1, Op2, Op3, Op4));
3506 //===----------------------------------------------------------------------===//
3507 // SelectionDAGISel code
3508 //===----------------------------------------------------------------------===//
3510 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
3511 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
3514 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
3515 // FIXME: we only modify the CFG to split critical edges. This
3516 // updates dom and loop info.
3517 AU.addRequired<AliasAnalysis>();
3521 /// OptimizeNoopCopyExpression - We have determined that the specified cast
3522 /// instruction is a noop copy (e.g. it's casting from one pointer type to
3523 /// another, int->uint, or int->sbyte on PPC.
3525 /// Return true if any changes are made.
3526 static bool OptimizeNoopCopyExpression(CastInst *CI) {
3527 BasicBlock *DefBB = CI->getParent();
3529 /// InsertedCasts - Only insert a cast in each block once.
3530 std::map<BasicBlock*, CastInst*> InsertedCasts;
3532 bool MadeChange = false;
3533 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
3535 Use &TheUse = UI.getUse();
3536 Instruction *User = cast<Instruction>(*UI);
3538 // Figure out which BB this cast is used in. For PHI's this is the
3539 // appropriate predecessor block.
3540 BasicBlock *UserBB = User->getParent();
3541 if (PHINode *PN = dyn_cast<PHINode>(User)) {
3542 unsigned OpVal = UI.getOperandNo()/2;
3543 UserBB = PN->getIncomingBlock(OpVal);
3546 // Preincrement use iterator so we don't invalidate it.
3549 // If this user is in the same block as the cast, don't change the cast.
3550 if (UserBB == DefBB) continue;
3552 // If we have already inserted a cast into this block, use it.
3553 CastInst *&InsertedCast = InsertedCasts[UserBB];
3555 if (!InsertedCast) {
3556 BasicBlock::iterator InsertPt = UserBB->begin();
3557 while (isa<PHINode>(InsertPt)) ++InsertPt;
3560 CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
3565 // Replace a use of the cast with a use of the new casat.
3566 TheUse = InsertedCast;
3569 // If we removed all uses, nuke the cast.
3570 if (CI->use_empty())
3571 CI->eraseFromParent();
3576 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
3577 /// casting to the type of GEPI.
3578 static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
3579 Instruction *GEPI, Value *Ptr,
3581 if (V) return V; // Already computed.
3583 // Figure out the insertion point
3584 BasicBlock::iterator InsertPt;
3585 if (BB == GEPI->getParent()) {
3586 // If GEP is already inserted into BB, insert right after the GEP.
3590 // Otherwise, insert at the top of BB, after any PHI nodes
3591 InsertPt = BB->begin();
3592 while (isa<PHINode>(InsertPt)) ++InsertPt;
3595 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
3596 // BB so that there is only one value live across basic blocks (the cast
3598 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
3599 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
3600 Ptr = CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(),
3603 // Add the offset, cast it to the right type.
3604 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
3605 // Ptr is an integer type, GEPI is pointer type ==> IntToPtr
3606 return V = CastInst::create(Instruction::IntToPtr, Ptr, GEPI->getType(),
3610 /// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
3611 /// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
3612 /// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
3613 /// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
3614 /// sink PtrOffset into user blocks where doing so will likely allow us to fold
3615 /// the constant add into a load or store instruction. Additionally, if a user
3616 /// is a pointer-pointer cast, we look through it to find its users.
3617 static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
3618 Constant *PtrOffset, BasicBlock *DefBB,
3619 GetElementPtrInst *GEPI,
3620 std::map<BasicBlock*,Instruction*> &InsertedExprs) {
3621 while (!RepPtr->use_empty()) {
3622 Instruction *User = cast<Instruction>(RepPtr->use_back());
3624 // If the user is a Pointer-Pointer cast, recurse. Only BitCast can be
3625 // used for a Pointer-Pointer cast.
3626 if (isa<BitCastInst>(User)) {
3627 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3629 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
3630 // could invalidate an iterator.
3631 User->setOperand(0, UndefValue::get(RepPtr->getType()));
3635 // If this is a load of the pointer, or a store through the pointer, emit
3636 // the increment into the load/store block.
3637 Instruction *NewVal;
3638 if (isa<LoadInst>(User) ||
3639 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
3640 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
3641 User->getParent(), GEPI,
3644 // If this use is not foldable into the addressing mode, use a version
3645 // emitted in the GEP block.
3646 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
3650 if (GEPI->getType() != RepPtr->getType()) {
3651 BasicBlock::iterator IP = NewVal;
3653 // NewVal must be a GEP which must be pointer type, so BitCast
3654 NewVal = new BitCastInst(NewVal, RepPtr->getType(), "", IP);
3656 User->replaceUsesOfWith(RepPtr, NewVal);
3661 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
3662 /// selection, we want to be a bit careful about some things. In particular, if
3663 /// we have a GEP instruction that is used in a different block than it is
3664 /// defined, the addressing expression of the GEP cannot be folded into loads or
3665 /// stores that use it. In this case, decompose the GEP and move constant
3666 /// indices into blocks that use it.
3667 static bool OptimizeGEPExpression(GetElementPtrInst *GEPI,
3668 const TargetData *TD) {
3669 // If this GEP is only used inside the block it is defined in, there is no
3670 // need to rewrite it.
3671 bool isUsedOutsideDefBB = false;
3672 BasicBlock *DefBB = GEPI->getParent();
3673 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
3675 if (cast<Instruction>(*UI)->getParent() != DefBB) {
3676 isUsedOutsideDefBB = true;
3680 if (!isUsedOutsideDefBB) return false;
3682 // If this GEP has no non-zero constant indices, there is nothing we can do,
3684 bool hasConstantIndex = false;
3685 bool hasVariableIndex = false;
3686 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3687 E = GEPI->op_end(); OI != E; ++OI) {
3688 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
3689 if (CI->getZExtValue()) {
3690 hasConstantIndex = true;
3694 hasVariableIndex = true;
3698 // If this is a "GEP X, 0, 0, 0", turn this into a cast.
3699 if (!hasConstantIndex && !hasVariableIndex) {
3700 /// The GEP operand must be a pointer, so must its result -> BitCast
3701 Value *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
3702 GEPI->getName(), GEPI);
3703 GEPI->replaceAllUsesWith(NC);
3704 GEPI->eraseFromParent();
3708 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
3709 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
3712 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
3713 // constant offset (which we now know is non-zero) and deal with it later.
3714 uint64_t ConstantOffset = 0;
3715 const Type *UIntPtrTy = TD->getIntPtrType();
3716 Value *Ptr = new PtrToIntInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
3717 const Type *Ty = GEPI->getOperand(0)->getType();
3719 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3720 E = GEPI->op_end(); OI != E; ++OI) {
3722 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
3723 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
3725 ConstantOffset += TD->getStructLayout(StTy)->getElementOffset(Field);
3726 Ty = StTy->getElementType(Field);
3728 Ty = cast<SequentialType>(Ty)->getElementType();
3730 // Handle constant subscripts.
3731 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
3732 if (CI->getZExtValue() == 0) continue;
3733 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue();
3737 // Ptr = Ptr + Idx * ElementSize;
3739 // Cast Idx to UIntPtrTy if needed.
3740 Idx = CastInst::createIntegerCast(Idx, UIntPtrTy, true/*SExt*/, "", GEPI);
3742 uint64_t ElementSize = TD->getTypeSize(Ty);
3743 // Mask off bits that should not be set.
3744 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3745 Constant *SizeCst = ConstantInt::get(UIntPtrTy, ElementSize);
3747 // Multiply by the element size and add to the base.
3748 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
3749 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
3753 // Make sure that the offset fits in uintptr_t.
3754 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3755 Constant *PtrOffset = ConstantInt::get(UIntPtrTy, ConstantOffset);
3757 // Okay, we have now emitted all of the variable index parts to the BB that
3758 // the GEP is defined in. Loop over all of the using instructions, inserting
3759 // an "add Ptr, ConstantOffset" into each block that uses it and update the
3760 // instruction to use the newly computed value, making GEPI dead. When the
3761 // user is a load or store instruction address, we emit the add into the user
3762 // block, otherwise we use a canonical version right next to the gep (these
3763 // won't be foldable as addresses, so we might as well share the computation).
3765 std::map<BasicBlock*,Instruction*> InsertedExprs;
3766 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3768 // Finally, the GEP is dead, remove it.
3769 GEPI->eraseFromParent();
3775 /// SplitEdgeNicely - Split the critical edge from TI to it's specified
3776 /// successor if it will improve codegen. We only do this if the successor has
3777 /// phi nodes (otherwise critical edges are ok). If there is already another
3778 /// predecessor of the succ that is empty (and thus has no phi nodes), use it
3779 /// instead of introducing a new block.
3780 static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) {
3781 BasicBlock *TIBB = TI->getParent();
3782 BasicBlock *Dest = TI->getSuccessor(SuccNum);
3783 assert(isa<PHINode>(Dest->begin()) &&
3784 "This should only be called if Dest has a PHI!");
3786 /// TIPHIValues - This array is lazily computed to determine the values of
3787 /// PHIs in Dest that TI would provide.
3788 std::vector<Value*> TIPHIValues;
3790 // Check to see if Dest has any blocks that can be used as a split edge for
3792 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) {
3793 BasicBlock *Pred = *PI;
3794 // To be usable, the pred has to end with an uncond branch to the dest.
3795 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator());
3796 if (!PredBr || !PredBr->isUnconditional() ||
3797 // Must be empty other than the branch.
3798 &Pred->front() != PredBr)
3801 // Finally, since we know that Dest has phi nodes in it, we have to make
3802 // sure that jumping to Pred will have the same affect as going to Dest in
3803 // terms of PHI values.
3806 bool FoundMatch = true;
3807 for (BasicBlock::iterator I = Dest->begin();
3808 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) {
3809 if (PHINo == TIPHIValues.size())
3810 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB));
3812 // If the PHI entry doesn't work, we can't use this pred.
3813 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) {
3819 // If we found a workable predecessor, change TI to branch to Succ.
3821 Dest->removePredecessor(TIBB);
3822 TI->setSuccessor(SuccNum, Pred);
3827 SplitCriticalEdge(TI, SuccNum, P, true);
3831 bool SelectionDAGISel::runOnFunction(Function &Fn) {
3832 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
3833 RegMap = MF.getSSARegMap();
3834 DOUT << "\n\n\n=== " << Fn.getName() << "\n";
3836 // First, split all critical edges.
3838 // In this pass we also look for GEP and cast instructions that are used
3839 // across basic blocks and rewrite them to improve basic-block-at-a-time
3842 bool MadeChange = true;
3843 while (MadeChange) {
3845 for (Function::iterator FNI = Fn.begin(), E = Fn.end(); FNI != E; ++FNI) {
3846 // Split all critical edges where the dest block has a PHI.
3847 TerminatorInst *BBTI = FNI->getTerminator();
3848 if (BBTI->getNumSuccessors() > 1) {
3849 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i)
3850 if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) &&
3851 isCriticalEdge(BBTI, i, true))
3852 SplitEdgeNicely(BBTI, i, this);
3856 for (BasicBlock::iterator BBI = FNI->begin(), E = FNI->end(); BBI != E; ) {
3857 Instruction *I = BBI++;
3859 if (CallInst *CI = dyn_cast<CallInst>(I)) {
3860 // If we found an inline asm expession, and if the target knows how to
3861 // lower it to normal LLVM code, do so now.
3862 if (isa<InlineAsm>(CI->getCalledValue()))
3863 if (const TargetAsmInfo *TAI =
3864 TLI.getTargetMachine().getTargetAsmInfo()) {
3865 if (TAI->ExpandInlineAsm(CI))
3868 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
3869 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData());
3870 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
3871 // If the source of the cast is a constant, then this should have
3872 // already been constant folded. The only reason NOT to constant fold
3873 // it is if something (e.g. LSR) was careful to place the constant
3874 // evaluation in a block other than then one that uses it (e.g. to hoist
3875 // the address of globals out of a loop). If this is the case, we don't
3876 // want to forward-subst the cast.
3877 if (isa<Constant>(CI->getOperand(0)))
3880 // If this is a noop copy, sink it into user blocks to reduce the number
3881 // of virtual registers that must be created and coallesced.
3882 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
3883 MVT::ValueType DstVT = TLI.getValueType(CI->getType());
3885 // This is an fp<->int conversion?
3886 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
3889 // If this is an extension, it will be a zero or sign extension, which
3891 if (SrcVT < DstVT) continue;
3893 // If these values will be promoted, find out what they will be promoted
3894 // to. This helps us consider truncates on PPC as noop copies when they
3896 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
3897 SrcVT = TLI.getTypeToTransformTo(SrcVT);
3898 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
3899 DstVT = TLI.getTypeToTransformTo(DstVT);
3901 // If, after promotion, these are the same types, this is a noop copy.
3903 MadeChange |= OptimizeNoopCopyExpression(CI);
3909 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
3911 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
3912 SelectBasicBlock(I, MF, FuncInfo);
3914 // Add function live-ins to entry block live-in set.
3915 BasicBlock *EntryBB = &Fn.getEntryBlock();
3916 BB = FuncInfo.MBBMap[EntryBB];
3917 if (!MF.livein_empty())
3918 for (MachineFunction::livein_iterator I = MF.livein_begin(),
3919 E = MF.livein_end(); I != E; ++I)
3920 BB->addLiveIn(I->first);
3925 SDOperand SelectionDAGLowering::CopyValueToVirtualRegister(Value *V,
3927 SDOperand Op = getValue(V);
3928 assert((Op.getOpcode() != ISD::CopyFromReg ||
3929 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
3930 "Copy from a reg to the same reg!");
3932 // If this type is not legal, we must make sure to not create an invalid
3934 MVT::ValueType SrcVT = Op.getValueType();
3935 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
3936 if (SrcVT == DestVT) {
3937 return DAG.getCopyToReg(getRoot(), Reg, Op);
3938 } else if (SrcVT == MVT::Vector) {
3939 // Handle copies from generic vectors to registers.
3940 MVT::ValueType PTyElementVT, PTyLegalElementVT;
3941 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
3942 PTyElementVT, PTyLegalElementVT);
3944 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
3945 // MVT::Vector type.
3946 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
3947 DAG.getConstant(NE, MVT::i32),
3948 DAG.getValueType(PTyElementVT));
3950 // Loop over all of the elements of the resultant vector,
3951 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
3952 // copying them into output registers.
3953 SmallVector<SDOperand, 8> OutChains;
3954 SDOperand Root = getRoot();
3955 for (unsigned i = 0; i != NE; ++i) {
3956 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
3957 Op, DAG.getConstant(i, TLI.getPointerTy()));
3958 if (PTyElementVT == PTyLegalElementVT) {
3959 // Elements are legal.
3960 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3961 } else if (PTyLegalElementVT > PTyElementVT) {
3962 // Elements are promoted.
3963 if (MVT::isFloatingPoint(PTyLegalElementVT))
3964 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
3966 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
3967 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3969 // Elements are expanded.
3970 // The src value is expanded into multiple registers.
3971 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3972 Elt, DAG.getConstant(0, TLI.getPointerTy()));
3973 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3974 Elt, DAG.getConstant(1, TLI.getPointerTy()));
3975 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
3976 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
3979 return DAG.getNode(ISD::TokenFactor, MVT::Other,
3980 &OutChains[0], OutChains.size());
3981 } else if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) {
3982 // The src value is promoted to the register.
3983 if (MVT::isFloatingPoint(SrcVT))
3984 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
3986 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
3987 return DAG.getCopyToReg(getRoot(), Reg, Op);
3989 DestVT = TLI.getTypeToExpandTo(SrcVT);
3990 unsigned NumVals = TLI.getNumElements(SrcVT);
3992 return DAG.getCopyToReg(getRoot(), Reg,
3993 DAG.getNode(ISD::BIT_CONVERT, DestVT, Op));
3994 assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!");
3995 // The src value is expanded into multiple registers.
3996 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3997 Op, DAG.getConstant(0, TLI.getPointerTy()));
3998 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3999 Op, DAG.getConstant(1, TLI.getPointerTy()));
4000 Op = DAG.getCopyToReg(getRoot(), Reg, Lo);
4001 return DAG.getCopyToReg(Op, Reg+1, Hi);
4005 void SelectionDAGISel::
4006 LowerArguments(BasicBlock *LLVMBB, SelectionDAGLowering &SDL,
4007 std::vector<SDOperand> &UnorderedChains) {
4008 // If this is the entry block, emit arguments.
4009 Function &F = *LLVMBB->getParent();
4010 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
4011 SDOperand OldRoot = SDL.DAG.getRoot();
4012 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
4015 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
4017 if (!AI->use_empty()) {
4018 SDL.setValue(AI, Args[a]);
4020 // If this argument is live outside of the entry block, insert a copy from
4021 // whereever we got it to the vreg that other BB's will reference it as.
4022 if (FuncInfo.ValueMap.count(AI)) {
4024 SDL.CopyValueToVirtualRegister(AI, FuncInfo.ValueMap[AI]);
4025 UnorderedChains.push_back(Copy);
4029 // Finally, if the target has anything special to do, allow it to do so.
4030 // FIXME: this should insert code into the DAG!
4031 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
4034 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
4035 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
4036 FunctionLoweringInfo &FuncInfo) {
4037 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
4039 std::vector<SDOperand> UnorderedChains;
4041 // Lower any arguments needed in this block if this is the entry block.
4042 if (LLVMBB == &LLVMBB->getParent()->front())
4043 LowerArguments(LLVMBB, SDL, UnorderedChains);
4045 BB = FuncInfo.MBBMap[LLVMBB];
4046 SDL.setCurrentBasicBlock(BB);
4048 // Lower all of the non-terminator instructions.
4049 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
4053 // Ensure that all instructions which are used outside of their defining
4054 // blocks are available as virtual registers.
4055 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
4056 if (!I->use_empty() && !isa<PHINode>(I)) {
4057 DenseMap<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
4058 if (VMI != FuncInfo.ValueMap.end())
4059 UnorderedChains.push_back(
4060 SDL.CopyValueToVirtualRegister(I, VMI->second));
4063 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
4064 // ensure constants are generated when needed. Remember the virtual registers
4065 // that need to be added to the Machine PHI nodes as input. We cannot just
4066 // directly add them, because expansion might result in multiple MBB's for one
4067 // BB. As such, the start of the BB might correspond to a different MBB than
4070 TerminatorInst *TI = LLVMBB->getTerminator();
4072 // Emit constants only once even if used by multiple PHI nodes.
4073 std::map<Constant*, unsigned> ConstantsOut;
4075 // Vector bool would be better, but vector<bool> is really slow.
4076 std::vector<unsigned char> SuccsHandled;
4077 if (TI->getNumSuccessors())
4078 SuccsHandled.resize(BB->getParent()->getNumBlockIDs());
4080 // Check successor nodes PHI nodes that expect a constant to be available from
4082 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
4083 BasicBlock *SuccBB = TI->getSuccessor(succ);
4084 if (!isa<PHINode>(SuccBB->begin())) continue;
4085 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
4087 // If this terminator has multiple identical successors (common for
4088 // switches), only handle each succ once.
4089 unsigned SuccMBBNo = SuccMBB->getNumber();
4090 if (SuccsHandled[SuccMBBNo]) continue;
4091 SuccsHandled[SuccMBBNo] = true;
4093 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
4096 // At this point we know that there is a 1-1 correspondence between LLVM PHI
4097 // nodes and Machine PHI nodes, but the incoming operands have not been
4099 for (BasicBlock::iterator I = SuccBB->begin();
4100 (PN = dyn_cast<PHINode>(I)); ++I) {
4101 // Ignore dead phi's.
4102 if (PN->use_empty()) continue;
4105 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
4107 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
4108 unsigned &RegOut = ConstantsOut[C];
4110 RegOut = FuncInfo.CreateRegForValue(C);
4111 UnorderedChains.push_back(
4112 SDL.CopyValueToVirtualRegister(C, RegOut));
4116 Reg = FuncInfo.ValueMap[PHIOp];
4118 assert(isa<AllocaInst>(PHIOp) &&
4119 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
4120 "Didn't codegen value into a register!??");
4121 Reg = FuncInfo.CreateRegForValue(PHIOp);
4122 UnorderedChains.push_back(
4123 SDL.CopyValueToVirtualRegister(PHIOp, Reg));
4127 // Remember that this register needs to added to the machine PHI node as
4128 // the input for this MBB.
4129 MVT::ValueType VT = TLI.getValueType(PN->getType());
4130 unsigned NumElements;
4131 if (VT != MVT::Vector)
4132 NumElements = TLI.getNumElements(VT);
4134 MVT::ValueType VT1,VT2;
4136 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
4139 for (unsigned i = 0, e = NumElements; i != e; ++i)
4140 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
4143 ConstantsOut.clear();
4145 // Turn all of the unordered chains into one factored node.
4146 if (!UnorderedChains.empty()) {
4147 SDOperand Root = SDL.getRoot();
4148 if (Root.getOpcode() != ISD::EntryToken) {
4149 unsigned i = 0, e = UnorderedChains.size();
4150 for (; i != e; ++i) {
4151 assert(UnorderedChains[i].Val->getNumOperands() > 1);
4152 if (UnorderedChains[i].Val->getOperand(0) == Root)
4153 break; // Don't add the root if we already indirectly depend on it.
4157 UnorderedChains.push_back(Root);
4159 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other,
4160 &UnorderedChains[0], UnorderedChains.size()));
4163 // Lower the terminator after the copies are emitted.
4164 SDL.visit(*LLVMBB->getTerminator());
4166 // Copy over any CaseBlock records that may now exist due to SwitchInst
4167 // lowering, as well as any jump table information.
4168 SwitchCases.clear();
4169 SwitchCases = SDL.SwitchCases;
4172 // Make sure the root of the DAG is up-to-date.
4173 DAG.setRoot(SDL.getRoot());
4176 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
4177 // Get alias analysis for load/store combining.
4178 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
4180 // Run the DAG combiner in pre-legalize mode.
4181 DAG.Combine(false, AA);
4183 DOUT << "Lowered selection DAG:\n";
4186 // Second step, hack on the DAG until it only uses operations and types that
4187 // the target supports.
4190 DOUT << "Legalized selection DAG:\n";
4193 // Run the DAG combiner in post-legalize mode.
4194 DAG.Combine(true, AA);
4196 if (ViewISelDAGs) DAG.viewGraph();
4198 // Third, instruction select all of the operations to machine code, adding the
4199 // code to the MachineBasicBlock.
4200 InstructionSelectBasicBlock(DAG);
4202 DOUT << "Selected machine code:\n";
4206 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
4207 FunctionLoweringInfo &FuncInfo) {
4208 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
4210 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
4213 // First step, lower LLVM code to some DAG. This DAG may use operations and
4214 // types that are not supported by the target.
4215 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
4217 // Second step, emit the lowered DAG as machine code.
4218 CodeGenAndEmitDAG(DAG);
4221 // Next, now that we know what the last MBB the LLVM BB expanded is, update
4222 // PHI nodes in successors.
4223 if (SwitchCases.empty() && JT.Reg == 0) {
4224 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
4225 MachineInstr *PHI = PHINodesToUpdate[i].first;
4226 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
4227 "This is not a machine PHI node that we are updating!");
4228 PHI->addRegOperand(PHINodesToUpdate[i].second, false);
4229 PHI->addMachineBasicBlockOperand(BB);
4234 // If the JumpTable record is filled in, then we need to emit a jump table.
4235 // Updating the PHI nodes is tricky in this case, since we need to determine
4236 // whether the PHI is a successor of the range check MBB or the jump table MBB
4238 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
4239 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
4241 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
4242 MachineBasicBlock *RangeBB = BB;
4243 // Set the current basic block to the mbb we wish to insert the code into
4245 SDL.setCurrentBasicBlock(BB);
4247 SDL.visitJumpTable(JT);
4248 SDAG.setRoot(SDL.getRoot());
4249 CodeGenAndEmitDAG(SDAG);
4251 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
4252 MachineInstr *PHI = PHINodesToUpdate[pi].first;
4253 MachineBasicBlock *PHIBB = PHI->getParent();
4254 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
4255 "This is not a machine PHI node that we are updating!");
4256 if (PHIBB == JT.Default) {
4257 PHI->addRegOperand(PHINodesToUpdate[pi].second, false);
4258 PHI->addMachineBasicBlockOperand(RangeBB);
4260 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
4261 PHI->addRegOperand(PHINodesToUpdate[pi].second, false);
4262 PHI->addMachineBasicBlockOperand(BB);
4268 // If the switch block involved a branch to one of the actual successors, we
4269 // need to update PHI nodes in that block.
4270 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
4271 MachineInstr *PHI = PHINodesToUpdate[i].first;
4272 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
4273 "This is not a machine PHI node that we are updating!");
4274 if (BB->isSuccessor(PHI->getParent())) {
4275 PHI->addRegOperand(PHINodesToUpdate[i].second, false);
4276 PHI->addMachineBasicBlockOperand(BB);
4280 // If we generated any switch lowering information, build and codegen any
4281 // additional DAGs necessary.
4282 for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
4283 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
4285 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
4287 // Set the current basic block to the mbb we wish to insert the code into
4288 BB = SwitchCases[i].ThisBB;
4289 SDL.setCurrentBasicBlock(BB);
4292 SDL.visitSwitchCase(SwitchCases[i]);
4293 SDAG.setRoot(SDL.getRoot());
4294 CodeGenAndEmitDAG(SDAG);
4296 // Handle any PHI nodes in successors of this chunk, as if we were coming
4297 // from the original BB before switch expansion. Note that PHI nodes can
4298 // occur multiple times in PHINodesToUpdate. We have to be very careful to
4299 // handle them the right number of times.
4300 while ((BB = SwitchCases[i].TrueBB)) { // Handle LHS and RHS.
4301 for (MachineBasicBlock::iterator Phi = BB->begin();
4302 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){
4303 // This value for this PHI node is recorded in PHINodesToUpdate, get it.
4304 for (unsigned pn = 0; ; ++pn) {
4305 assert(pn != PHINodesToUpdate.size() && "Didn't find PHI entry!");
4306 if (PHINodesToUpdate[pn].first == Phi) {
4307 Phi->addRegOperand(PHINodesToUpdate[pn].second, false);
4308 Phi->addMachineBasicBlockOperand(SwitchCases[i].ThisBB);
4314 // Don't process RHS if same block as LHS.
4315 if (BB == SwitchCases[i].FalseBB)
4316 SwitchCases[i].FalseBB = 0;
4318 // If we haven't handled the RHS, do so now. Otherwise, we're done.
4319 SwitchCases[i].TrueBB = SwitchCases[i].FalseBB;
4320 SwitchCases[i].FalseBB = 0;
4322 assert(SwitchCases[i].TrueBB == 0 && SwitchCases[i].FalseBB == 0);
4327 //===----------------------------------------------------------------------===//
4328 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
4329 /// target node in the graph.
4330 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
4331 if (ViewSchedDAGs) DAG.viewGraph();
4333 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault();
4337 RegisterScheduler::setDefault(Ctor);
4340 ScheduleDAG *SL = Ctor(this, &DAG, BB);
4346 HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
4347 return new HazardRecognizer();
4350 //===----------------------------------------------------------------------===//
4351 // Helper functions used by the generated instruction selector.
4352 //===----------------------------------------------------------------------===//
4353 // Calls to these methods are generated by tblgen.
4355 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
4356 /// the dag combiner simplified the 255, we still want to match. RHS is the
4357 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
4358 /// specified in the .td file (e.g. 255).
4359 bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS,
4360 int64_t DesiredMaskS) {
4361 uint64_t ActualMask = RHS->getValue();
4362 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType());
4364 // If the actual mask exactly matches, success!
4365 if (ActualMask == DesiredMask)
4368 // If the actual AND mask is allowing unallowed bits, this doesn't match.
4369 if (ActualMask & ~DesiredMask)
4372 // Otherwise, the DAG Combiner may have proven that the value coming in is
4373 // either already zero or is not demanded. Check for known zero input bits.
4374 uint64_t NeededMask = DesiredMask & ~ActualMask;
4375 if (getTargetLowering().MaskedValueIsZero(LHS, NeededMask))
4378 // TODO: check to see if missing bits are just not demanded.
4380 // Otherwise, this pattern doesn't match.
4384 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
4385 /// the dag combiner simplified the 255, we still want to match. RHS is the
4386 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
4387 /// specified in the .td file (e.g. 255).
4388 bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS,
4389 int64_t DesiredMaskS) {
4390 uint64_t ActualMask = RHS->getValue();
4391 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType());
4393 // If the actual mask exactly matches, success!
4394 if (ActualMask == DesiredMask)
4397 // If the actual AND mask is allowing unallowed bits, this doesn't match.
4398 if (ActualMask & ~DesiredMask)
4401 // Otherwise, the DAG Combiner may have proven that the value coming in is
4402 // either already zero or is not demanded. Check for known zero input bits.
4403 uint64_t NeededMask = DesiredMask & ~ActualMask;
4405 uint64_t KnownZero, KnownOne;
4406 getTargetLowering().ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
4408 // If all the missing bits in the or are already known to be set, match!
4409 if ((NeededMask & KnownOne) == NeededMask)
4412 // TODO: check to see if missing bits are just not demanded.
4414 // Otherwise, this pattern doesn't match.
4419 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
4420 /// by tblgen. Others should not call it.
4421 void SelectionDAGISel::
4422 SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
4423 std::vector<SDOperand> InOps;
4424 std::swap(InOps, Ops);
4426 Ops.push_back(InOps[0]); // input chain.
4427 Ops.push_back(InOps[1]); // input asm string.
4429 unsigned i = 2, e = InOps.size();
4430 if (InOps[e-1].getValueType() == MVT::Flag)
4431 --e; // Don't process a flag operand if it is here.
4434 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
4435 if ((Flags & 7) != 4 /*MEM*/) {
4436 // Just skip over this operand, copying the operands verbatim.
4437 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
4438 i += (Flags >> 3) + 1;
4440 assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
4441 // Otherwise, this is a memory operand. Ask the target to select it.
4442 std::vector<SDOperand> SelOps;
4443 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
4444 cerr << "Could not match memory address. Inline asm failure!\n";
4448 // Add this to the output node.
4449 Ops.push_back(DAG.getTargetConstant(4/*MEM*/ | (SelOps.size() << 3),
4451 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
4456 // Add the flag input back if present.
4457 if (e != InOps.size())
4458 Ops.push_back(InOps.back());