1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/Streams.h"
39 #include "llvm/ADT/SmallPtrSet.h"
40 #include "llvm/ADT/Statistic.h"
45 STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
46 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
48 //===----------------------------------------------------------------------===//
49 // Pattern Matcher Implementation
50 //===----------------------------------------------------------------------===//
53 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
54 /// SDValue's instead of register numbers for the leaves of the matched
56 struct X86ISelAddressMode {
62 struct { // This is really a union, discriminated by BaseType!
67 bool isRIPRel; // RIP as base?
75 unsigned Align; // CP alignment.
78 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
79 GV(0), CP(0), ES(0), JT(-1), Align(0) {
82 cerr << "X86ISelAddressMode " << this << "\n";
83 cerr << "Base.Reg "; if (Base.Reg.Val!=0) Base.Reg.Val->dump();
85 cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
86 cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n";
87 cerr << "IndexReg "; if (IndexReg.Val!=0) IndexReg.Val->dump();
89 cerr << " Disp " << Disp << "\n";
90 cerr << "GV "; if (GV) GV->dump();
92 cerr << " CP "; if (CP) CP->dump();
95 cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
96 cerr << " JT" << JT << " Align" << Align << "\n";
102 //===--------------------------------------------------------------------===//
103 /// ISel - X86 specific code to select X86 machine instructions for
104 /// SelectionDAG operations.
106 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
107 /// ContainsFPCode - Every instruction we select that uses or defines a FP
108 /// register should set this to true.
111 /// TM - Keep a reference to X86TargetMachine.
113 X86TargetMachine &TM;
115 /// X86Lowering - This object fully describes how to lower LLVM code to an
116 /// X86-specific SelectionDAG.
117 X86TargetLowering X86Lowering;
119 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
120 /// make the right decision when generating code for different targets.
121 const X86Subtarget *Subtarget;
123 /// GlobalBaseReg - keeps track of the virtual register mapped onto global
125 unsigned GlobalBaseReg;
127 /// CurBB - Current BB being isel'd.
129 MachineBasicBlock *CurBB;
132 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
133 : SelectionDAGISel(X86Lowering, fast),
134 ContainsFPCode(false), TM(tm),
135 X86Lowering(*TM.getTargetLowering()),
136 Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
138 virtual bool runOnFunction(Function &Fn) {
139 // Make sure we re-emit a set of the global base reg if necessary
141 return SelectionDAGISel::runOnFunction(Fn);
144 virtual const char *getPassName() const {
145 return "X86 DAG->DAG Instruction Selection";
148 /// InstructionSelect - This callback is invoked by
149 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
150 virtual void InstructionSelect(SelectionDAG &DAG);
152 /// InstructionSelectPostProcessing - Post processing of selected and
153 /// scheduled basic blocks.
154 virtual void InstructionSelectPostProcessing();
156 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
158 virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const;
160 // Include the pieces autogenerated from the target description.
161 #include "X86GenDAGISel.inc"
164 SDNode *Select(SDValue N);
166 bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
167 bool isRoot = true, unsigned Depth = 0);
168 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
169 bool isRoot, unsigned Depth);
170 bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
171 SDValue &Scale, SDValue &Index, SDValue &Disp);
172 bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
173 SDValue &Scale, SDValue &Index, SDValue &Disp);
174 bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
175 SDValue N, SDValue &Base, SDValue &Scale,
176 SDValue &Index, SDValue &Disp,
177 SDValue &InChain, SDValue &OutChain);
178 bool TryFoldLoad(SDValue P, SDValue N,
179 SDValue &Base, SDValue &Scale,
180 SDValue &Index, SDValue &Disp);
181 void PreprocessForRMW(SelectionDAG &DAG);
182 void PreprocessForFPConvert(SelectionDAG &DAG);
184 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
185 /// inline asm expressions.
186 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
188 std::vector<SDValue> &OutOps,
191 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
193 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
194 SDValue &Scale, SDValue &Index,
196 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
197 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
199 Scale = getI8Imm(AM.Scale);
201 // These are 32-bit even in 64-bit mode since RIP relative offset
204 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
206 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp);
208 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
209 else if (AM.JT != -1)
210 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
212 Disp = getI32Imm(AM.Disp);
215 /// getI8Imm - Return a target constant with the specified value, of type
217 inline SDValue getI8Imm(unsigned Imm) {
218 return CurDAG->getTargetConstant(Imm, MVT::i8);
221 /// getI16Imm - Return a target constant with the specified value, of type
223 inline SDValue getI16Imm(unsigned Imm) {
224 return CurDAG->getTargetConstant(Imm, MVT::i16);
227 /// getI32Imm - Return a target constant with the specified value, of type
229 inline SDValue getI32Imm(unsigned Imm) {
230 return CurDAG->getTargetConstant(Imm, MVT::i32);
233 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
234 /// base register. Return the virtual register that holds this value.
235 SDNode *getGlobalBaseReg();
237 /// getTruncate - return an SDNode that implements a subreg based truncate
238 /// of the specified operand to the the specified value type.
239 SDNode *getTruncate(SDValue N0, MVT VT);
247 /// findFlagUse - Return use of MVT::Flag value produced by the specified SDNode.
249 static SDNode *findFlagUse(SDNode *N) {
250 unsigned FlagResNo = N->getNumValues()-1;
251 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
253 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
254 SDValue Op = User->getOperand(i);
255 if (Op.Val == N && Op.ResNo == FlagResNo)
262 /// findNonImmUse - Return true by reference in "found" if "Use" is an
263 /// non-immediate use of "Def". This function recursively traversing
264 /// up the operand chain ignoring certain nodes.
265 static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
266 SDNode *Root, SDNode *Skip, bool &found,
267 SmallPtrSet<SDNode*, 16> &Visited) {
269 Use->getNodeId() > Def->getNodeId() ||
270 !Visited.insert(Use))
273 for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
274 SDNode *N = Use->getOperand(i).Val;
279 continue; // We are not looking for immediate use.
281 // Must be a chain reading node where it is possible to reach its own
282 // chain operand through a path started from another operand.
283 assert(Use->getOpcode() == ISD::STORE ||
284 Use->getOpcode() == X86ISD::CMP ||
285 Use->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
286 Use->getOpcode() == ISD::INTRINSIC_VOID);
293 // Traverse up the operand chain.
294 findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
298 /// isNonImmUse - Start searching from Root up the DAG to check is Def can
299 /// be reached. Return true if that's the case. However, ignore direct uses
300 /// by ImmedUse (which would be U in the example illustrated in
301 /// CanBeFoldedBy) and by Root (which can happen in the store case).
302 /// FIXME: to be really generic, we should allow direct use by any node
303 /// that is being folded. But realisticly since we only fold loads which
304 /// have one non-chain use, we only need to watch out for load/op/store
305 /// and load/op/cmp case where the root (store / cmp) may reach the load via
306 /// its chain operand.
307 static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
308 SDNode *Skip = NULL) {
309 SmallPtrSet<SDNode*, 16> Visited;
311 findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
316 bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const {
317 if (FastISel) return false;
319 // If U use can somehow reach N through another path then U can't fold N or
320 // it will create a cycle. e.g. In the following diagram, U can reach N
321 // through X. If N is folded into into U, then X is both a predecessor and
332 if (isNonImmUse(Root, N, U))
335 // If U produces a flag, then it gets (even more) interesting. Since it
336 // would have been "glued" together with its flag use, we need to check if
349 // If FU (flag use) indirectly reach N (the load), and U fold N (call it
350 // NU), then TF is a predecessor of FU and a successor of NU. But since
351 // NU and FU are flagged together, this effectively creates a cycle.
352 bool HasFlagUse = false;
353 MVT VT = Root->getValueType(Root->getNumValues()-1);
354 while ((VT == MVT::Flag && !Root->use_empty())) {
355 SDNode *FU = findFlagUse(Root);
362 VT = Root->getValueType(Root->getNumValues()-1);
366 return !isNonImmUse(Root, N, Root, U);
370 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
371 /// and move load below the TokenFactor. Replace store's chain operand with
372 /// load's chain result.
373 static void MoveBelowTokenFactor(SelectionDAG &DAG, SDValue Load,
374 SDValue Store, SDValue TF) {
375 std::vector<SDValue> Ops;
376 for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i)
377 if (Load.Val == TF.Val->getOperand(i).Val)
378 Ops.push_back(Load.Val->getOperand(0));
380 Ops.push_back(TF.Val->getOperand(i));
381 DAG.UpdateNodeOperands(TF, &Ops[0], Ops.size());
382 DAG.UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
383 DAG.UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
384 Store.getOperand(2), Store.getOperand(3));
387 /// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
389 static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address,
391 if (N.getOpcode() == ISD::BIT_CONVERT)
394 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
395 if (!LD || LD->isVolatile())
397 if (LD->getAddressingMode() != ISD::UNINDEXED)
400 ISD::LoadExtType ExtType = LD->getExtensionType();
401 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
405 N.getOperand(1) == Address &&
406 N.Val->isOperandOf(Chain.Val)) {
413 /// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
414 /// This is only run if not in -fast mode (aka -O0).
415 /// This allows the instruction selector to pick more read-modify-write
416 /// instructions. This is a common case:
426 /// [TokenFactor] [Op]
433 /// The fact the store's chain operand != load's chain will prevent the
434 /// (store (op (load))) instruction from being selected. We can transform it to:
453 void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
454 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
455 E = DAG.allnodes_end(); I != E; ++I) {
456 if (!ISD::isNON_TRUNCStore(I))
458 SDValue Chain = I->getOperand(0);
459 if (Chain.Val->getOpcode() != ISD::TokenFactor)
462 SDValue N1 = I->getOperand(1);
463 SDValue N2 = I->getOperand(2);
464 if ((N1.getValueType().isFloatingPoint() &&
465 !N1.getValueType().isVector()) ||
471 unsigned Opcode = N1.Val->getOpcode();
480 case ISD::VECTOR_SHUFFLE: {
481 SDValue N10 = N1.getOperand(0);
482 SDValue N11 = N1.getOperand(1);
483 RModW = isRMWLoad(N10, Chain, N2, Load);
485 RModW = isRMWLoad(N11, Chain, N2, Load);
498 SDValue N10 = N1.getOperand(0);
499 RModW = isRMWLoad(N10, Chain, N2, Load);
505 MoveBelowTokenFactor(DAG, Load, SDValue(I, 0), Chain);
512 /// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
513 /// nodes that target the FP stack to be store and load to the stack. This is a
514 /// gross hack. We would like to simply mark these as being illegal, but when
515 /// we do that, legalize produces these when it expands calls, then expands
516 /// these in the same legalize pass. We would like dag combine to be able to
517 /// hack on these between the call expansion and the node legalization. As such
518 /// this pass basically does "really late" legalization of these inline with the
520 void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) {
521 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
522 E = DAG.allnodes_end(); I != E; ) {
523 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
524 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
527 // If the source and destination are SSE registers, then this is a legal
528 // conversion that should not be lowered.
529 MVT SrcVT = N->getOperand(0).getValueType();
530 MVT DstVT = N->getValueType(0);
531 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
532 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
533 if (SrcIsSSE && DstIsSSE)
536 if (!SrcIsSSE && !DstIsSSE) {
537 // If this is an FPStack extension, it is a noop.
538 if (N->getOpcode() == ISD::FP_EXTEND)
540 // If this is a value-preserving FPStack truncation, it is a noop.
541 if (N->getConstantOperandVal(1))
545 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
546 // FPStack has extload and truncstore. SSE can fold direct loads into other
547 // operations. Based on this, decide what we want to do.
549 if (N->getOpcode() == ISD::FP_ROUND)
550 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
552 MemVT = SrcIsSSE ? SrcVT : DstVT;
554 SDValue MemTmp = DAG.CreateStackTemporary(MemVT);
556 // FIXME: optimize the case where the src/dest is a load or store?
557 SDValue Store = DAG.getTruncStore(DAG.getEntryNode(), N->getOperand(0),
558 MemTmp, NULL, 0, MemVT);
559 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
562 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
563 // extload we created. This will cause general havok on the dag because
564 // anything below the conversion could be folded into other existing nodes.
565 // To avoid invalidating 'I', back it up to the convert node.
567 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
569 // Now that we did that, the node is dead. Increment the iterator to the
570 // next node to process, then delete N.
576 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
577 /// when it has created a SelectionDAG for us to codegen.
578 void X86DAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
579 CurBB = BB; // BB can change as result of isel.
583 PreprocessForRMW(DAG);
585 // FIXME: This should only happen when not -fast.
586 PreprocessForFPConvert(DAG);
588 // Codegen the basic block.
590 DOUT << "===== Instruction selection begins:\n";
593 DAG.setRoot(SelectRoot(DAG.getRoot()));
595 DOUT << "===== Instruction selection ends:\n";
598 DAG.RemoveDeadNodes();
601 void X86DAGToDAGISel::InstructionSelectPostProcessing() {
602 // If we are emitting FP stack code, scan the basic block to determine if this
603 // block defines any FP values. If so, put an FP_REG_KILL instruction before
604 // the terminator of the block.
606 // Note that FP stack instructions are used in all modes for long double,
607 // so we always need to do this check.
608 // Also note that it's possible for an FP stack register to be live across
609 // an instruction that produces multiple basic blocks (SSE CMOV) so we
610 // must check all the generated basic blocks.
612 // Scan all of the machine instructions in these MBBs, checking for FP
613 // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
614 MachineFunction::iterator MBBI = CurBB;
615 MachineFunction::iterator EndMBB = BB; ++EndMBB;
616 for (; MBBI != EndMBB; ++MBBI) {
617 MachineBasicBlock *MBB = MBBI;
619 // If this block returns, ignore it. We don't want to insert an FP_REG_KILL
620 // before the return.
622 MachineBasicBlock::iterator EndI = MBB->end();
624 if (EndI->getDesc().isReturn())
628 bool ContainsFPCode = false;
629 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
630 !ContainsFPCode && I != E; ++I) {
631 if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
632 const TargetRegisterClass *clas;
633 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
634 if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
635 TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
636 ((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
637 X86::RFP32RegisterClass ||
638 clas == X86::RFP64RegisterClass ||
639 clas == X86::RFP80RegisterClass)) {
640 ContainsFPCode = true;
646 // Check PHI nodes in successor blocks. These PHI's will be lowered to have
647 // a copy of the input value in this block. In SSE mode, we only care about
649 if (!ContainsFPCode) {
650 // Final check, check LLVM BB's that are successors to the LLVM BB
651 // corresponding to BB for FP PHI nodes.
652 const BasicBlock *LLVMBB = BB->getBasicBlock();
654 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
655 !ContainsFPCode && SI != E; ++SI) {
656 for (BasicBlock::const_iterator II = SI->begin();
657 (PN = dyn_cast<PHINode>(II)); ++II) {
658 if (PN->getType()==Type::X86_FP80Ty ||
659 (!Subtarget->hasSSE1() && PN->getType()->isFloatingPoint()) ||
660 (!Subtarget->hasSSE2() && PN->getType()==Type::DoubleTy)) {
661 ContainsFPCode = true;
667 // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
668 if (ContainsFPCode) {
669 BuildMI(*MBB, MBBI->getFirstTerminator(),
670 TM.getInstrInfo()->get(X86::FP_REG_KILL));
676 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
677 /// the main function.
678 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
679 MachineFrameInfo *MFI) {
680 const TargetInstrInfo *TII = TM.getInstrInfo();
681 if (Subtarget->isTargetCygMing())
682 BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
685 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
686 // If this is main, emit special code for main.
687 MachineBasicBlock *BB = MF.begin();
688 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
689 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
692 /// MatchAddress - Add the specified node to the specified addressing mode,
693 /// returning true if it cannot be done. This just pattern matches for the
695 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
696 bool isRoot, unsigned Depth) {
697 DOUT << "MatchAddress: "; DEBUG(AM.dump());
700 return MatchAddressBase(N, AM, isRoot, Depth);
702 // RIP relative addressing: %rip + 32-bit displacement!
704 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
705 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
706 if (isInt32(AM.Disp + Val)) {
714 int id = N.Val->getNodeId();
715 bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
717 switch (N.getOpcode()) {
719 case ISD::Constant: {
720 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
721 if (isInt32(AM.Disp + Val)) {
728 case X86ISD::Wrapper: {
729 DOUT << "Wrapper: 64bit " << Subtarget->is64Bit();
730 DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
731 DOUT << "AlreadySelected " << AlreadySelected << "\n";
732 bool is64Bit = Subtarget->is64Bit();
733 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
734 // Also, base and index reg must be 0 in order to use rip as base.
735 if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
736 AM.Base.Reg.Val || AM.IndexReg.Val))
738 if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
740 // If value is available in a register both base and index components have
741 // been picked, we can't fit the result available in the register in the
742 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
743 if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
744 SDValue N0 = N.getOperand(0);
745 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
746 GlobalValue *GV = G->getGlobal();
748 AM.Disp += G->getOffset();
749 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
750 Subtarget->isPICStyleRIPRel();
752 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
753 AM.CP = CP->getConstVal();
754 AM.Align = CP->getAlignment();
755 AM.Disp += CP->getOffset();
756 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
757 Subtarget->isPICStyleRIPRel();
759 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
760 AM.ES = S->getSymbol();
761 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
762 Subtarget->isPICStyleRIPRel();
764 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
765 AM.JT = J->getIndex();
766 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
767 Subtarget->isPICStyleRIPRel();
774 case ISD::FrameIndex:
775 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
776 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
777 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
783 if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1 || AM.isRIPRel)
786 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
787 unsigned Val = CN->getValue();
788 if (Val == 1 || Val == 2 || Val == 3) {
790 SDValue ShVal = N.Val->getOperand(0);
792 // Okay, we know that we have a scale by now. However, if the scaled
793 // value is an add of something and a constant, we can fold the
794 // constant into the disp field here.
795 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
796 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
797 AM.IndexReg = ShVal.Val->getOperand(0);
798 ConstantSDNode *AddVal =
799 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
800 uint64_t Disp = AM.Disp + (AddVal->getValue() << Val);
815 // A mul_lohi where we need the low part can be folded as a plain multiply.
816 if (N.ResNo != 0) break;
819 // X*[3,5,9] -> X+X*[2,4,8]
820 if (!AlreadySelected &&
821 AM.BaseType == X86ISelAddressMode::RegBase &&
822 AM.Base.Reg.Val == 0 &&
823 AM.IndexReg.Val == 0 &&
825 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
826 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
827 AM.Scale = unsigned(CN->getValue())-1;
829 SDValue MulVal = N.Val->getOperand(0);
832 // Okay, we know that we have a scale by now. However, if the scaled
833 // value is an add of something and a constant, we can fold the
834 // constant into the disp field here.
835 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
836 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
837 Reg = MulVal.Val->getOperand(0);
838 ConstantSDNode *AddVal =
839 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
840 uint64_t Disp = AM.Disp + AddVal->getValue() * CN->getValue();
844 Reg = N.Val->getOperand(0);
846 Reg = N.Val->getOperand(0);
849 AM.IndexReg = AM.Base.Reg = Reg;
856 if (!AlreadySelected) {
857 X86ISelAddressMode Backup = AM;
858 if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
859 !MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
862 if (!MatchAddress(N.Val->getOperand(1), AM, false, Depth+1) &&
863 !MatchAddress(N.Val->getOperand(0), AM, false, Depth+1))
870 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
871 if (AlreadySelected) break;
873 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
874 X86ISelAddressMode Backup = AM;
875 // Start with the LHS as an addr mode.
876 if (!MatchAddress(N.getOperand(0), AM, false) &&
877 // Address could not have picked a GV address for the displacement.
879 // On x86-64, the resultant disp must fit in 32-bits.
880 isInt32(AM.Disp + CN->getSignExtended()) &&
881 // Check to see if the LHS & C is zero.
882 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
883 AM.Disp += CN->getValue();
891 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
892 // allows us to fold the shift into this addressing mode.
893 if (AlreadySelected) break;
894 SDValue Shift = N.getOperand(0);
895 if (Shift.getOpcode() != ISD::SHL) break;
897 // Scale must not be used already.
898 if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
900 // Not when RIP is used as the base.
901 if (AM.isRIPRel) break;
903 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
904 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
905 if (!C1 || !C2) break;
907 // Not likely to be profitable if either the AND or SHIFT node has more
908 // than one use (unless all uses are for address computation). Besides,
909 // isel mechanism requires their node ids to be reused.
910 if (!N.hasOneUse() || !Shift.hasOneUse())
913 // Verify that the shift amount is something we can fold.
914 unsigned ShiftCst = C1->getValue();
915 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
918 // Get the new AND mask, this folds to a constant.
919 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
920 SDValue(C2, 0), SDValue(C1, 0));
921 SDValue NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
922 Shift.getOperand(0), NewANDMask);
923 NewANDMask.Val->setNodeId(Shift.Val->getNodeId());
924 NewAND.Val->setNodeId(N.Val->getNodeId());
926 AM.Scale = 1 << ShiftCst;
927 AM.IndexReg = NewAND;
932 return MatchAddressBase(N, AM, isRoot, Depth);
935 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
936 /// specified addressing mode without any further recursion.
937 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
938 bool isRoot, unsigned Depth) {
939 // Is the base register already occupied?
940 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
941 // If so, check to see if the scale index register is set.
942 if (AM.IndexReg.Val == 0 && !AM.isRIPRel) {
948 // Otherwise, we cannot select it.
952 // Default, generate it as a register.
953 AM.BaseType = X86ISelAddressMode::RegBase;
958 /// SelectAddr - returns true if it is able pattern match an addressing mode.
959 /// It returns the operands which make up the maximal addressing mode it can
960 /// match by reference.
961 bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
962 SDValue &Scale, SDValue &Index,
964 X86ISelAddressMode AM;
965 if (MatchAddress(N, AM))
968 MVT VT = N.getValueType();
969 if (AM.BaseType == X86ISelAddressMode::RegBase) {
970 if (!AM.Base.Reg.Val)
971 AM.Base.Reg = CurDAG->getRegister(0, VT);
974 if (!AM.IndexReg.Val)
975 AM.IndexReg = CurDAG->getRegister(0, VT);
977 getAddressOperands(AM, Base, Scale, Index, Disp);
981 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
983 static inline bool isZeroNode(SDValue Elt) {
984 return ((isa<ConstantSDNode>(Elt) &&
985 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
986 (isa<ConstantFPSDNode>(Elt) &&
987 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
991 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
992 /// match a load whose top elements are either undef or zeros. The load flavor
993 /// is derived from the type of N, which is either v4f32 or v2f64.
994 bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
995 SDValue N, SDValue &Base,
996 SDValue &Scale, SDValue &Index,
997 SDValue &Disp, SDValue &InChain,
999 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1000 InChain = N.getOperand(0).getValue(1);
1001 if (ISD::isNON_EXTLoad(InChain.Val) &&
1002 InChain.getValue(0).hasOneUse() &&
1004 CanBeFoldedBy(N.Val, Pred.Val, Op.Val)) {
1005 LoadSDNode *LD = cast<LoadSDNode>(InChain);
1006 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
1008 OutChain = LD->getChain();
1013 // Also handle the case where we explicitly require zeros in the top
1014 // elements. This is a vector shuffle from the zero vector.
1015 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.Val->hasOneUse() &&
1016 // Check to see if the top elements are all zeros (or bitcast of zeros).
1017 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1018 N.getOperand(0).Val->hasOneUse() &&
1019 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).Val) &&
1020 N.getOperand(0).getOperand(0).hasOneUse()) {
1021 // Okay, this is a zero extending load. Fold it.
1022 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1023 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
1025 OutChain = LD->getChain();
1026 InChain = SDValue(LD, 1);
1033 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1034 /// mode it matches can be cost effectively emitted as an LEA instruction.
1035 bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
1036 SDValue &Base, SDValue &Scale,
1037 SDValue &Index, SDValue &Disp) {
1038 X86ISelAddressMode AM;
1039 if (MatchAddress(N, AM))
1042 MVT VT = N.getValueType();
1043 unsigned Complexity = 0;
1044 if (AM.BaseType == X86ISelAddressMode::RegBase)
1045 if (AM.Base.Reg.Val)
1048 AM.Base.Reg = CurDAG->getRegister(0, VT);
1049 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1052 if (AM.IndexReg.Val)
1055 AM.IndexReg = CurDAG->getRegister(0, VT);
1057 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1062 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1063 // to a LEA. This is determined with some expermentation but is by no means
1064 // optimal (especially for code size consideration). LEA is nice because of
1065 // its three-address nature. Tweak the cost function again when we can run
1066 // convertToThreeAddress() at register allocation time.
1067 if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
1068 // For X86-64, we should always use lea to materialize RIP relative
1070 if (Subtarget->is64Bit())
1076 if (AM.Disp && (AM.Base.Reg.Val || AM.IndexReg.Val))
1079 if (Complexity > 2) {
1080 getAddressOperands(AM, Base, Scale, Index, Disp);
1086 bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
1087 SDValue &Base, SDValue &Scale,
1088 SDValue &Index, SDValue &Disp) {
1089 if (ISD::isNON_EXTLoad(N.Val) &&
1091 CanBeFoldedBy(N.Val, P.Val, P.Val))
1092 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
1096 /// getGlobalBaseReg - Output the instructions required to put the
1097 /// base address to use for accessing globals into a register.
1099 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1100 assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing");
1101 if (!GlobalBaseReg) {
1102 // Insert the set of GlobalBaseReg into the first MBB of the function
1103 MachineFunction *MF = BB->getParent();
1104 MachineBasicBlock &FirstMBB = MF->front();
1105 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
1106 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1107 unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
1109 const TargetInstrInfo *TII = TM.getInstrInfo();
1110 // Operand of MovePCtoStack is completely ignored by asm printer. It's
1111 // only used in JIT code emission as displacement to pc.
1112 BuildMI(FirstMBB, MBBI, TII->get(X86::MOVPC32r), PC).addImm(0);
1114 // If we're using vanilla 'GOT' PIC style, we should use relative addressing
1115 // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external
1116 if (TM.getRelocationModel() == Reloc::PIC_ &&
1117 Subtarget->isPICStyleGOT()) {
1118 GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
1119 BuildMI(FirstMBB, MBBI, TII->get(X86::ADD32ri), GlobalBaseReg)
1120 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
1126 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
1129 static SDNode *FindCallStartFromCall(SDNode *Node) {
1130 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1131 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1132 "Node doesn't have a token chain argument!");
1133 return FindCallStartFromCall(Node->getOperand(0).Val);
1136 SDNode *X86DAGToDAGISel::getTruncate(SDValue N0, MVT VT) {
1138 switch (VT.getSimpleVT()) {
1139 default: assert(0 && "Unknown truncate!");
1141 SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1142 // Ensure that the source register has an 8-bit subreg on 32-bit targets
1143 if (!Subtarget->is64Bit()) {
1145 MVT N0VT = N0.getValueType();
1146 switch (N0VT.getSimpleVT()) {
1147 default: assert(0 && "Unknown truncate!");
1149 Opc = X86::MOV16to16_;
1152 Opc = X86::MOV32to32_;
1155 N0 = SDValue(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0);
1156 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1157 VT, N0, SRIdx, N0.getValue(1));
1161 SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
1164 SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
1167 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx);
1171 SDNode *X86DAGToDAGISel::Select(SDValue N) {
1172 SDNode *Node = N.Val;
1173 MVT NVT = Node->getValueType(0);
1175 unsigned Opcode = Node->getOpcode();
1178 DOUT << std::string(Indent, ' ') << "Selecting: ";
1179 DEBUG(Node->dump(CurDAG));
1184 if (Node->isMachineOpcode()) {
1186 DOUT << std::string(Indent-2, ' ') << "== ";
1187 DEBUG(Node->dump(CurDAG));
1191 return NULL; // Already selected.
1196 case X86ISD::GlobalBaseReg:
1197 return getGlobalBaseReg();
1200 // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
1201 // code and is matched first so to prevent it from being turned into
1203 // In 64-bit small code size mode, use LEA to take advantage of
1204 // RIP-relative addressing.
1205 if (TM.getCodeModel() != CodeModel::Small)
1207 MVT PtrVT = TLI.getPointerTy();
1208 SDValue N0 = N.getOperand(0);
1209 SDValue N1 = N.getOperand(1);
1210 if (N.Val->getValueType(0) == PtrVT &&
1211 N0.getOpcode() == X86ISD::Wrapper &&
1212 N1.getOpcode() == ISD::Constant) {
1213 unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue();
1215 // TODO: handle ExternalSymbolSDNode.
1216 if (GlobalAddressSDNode *G =
1217 dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
1218 C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
1219 G->getOffset() + Offset);
1220 } else if (ConstantPoolSDNode *CP =
1221 dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
1222 C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
1224 CP->getOffset()+Offset);
1228 if (Subtarget->is64Bit()) {
1229 SDValue Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
1230 CurDAG->getRegister(0, PtrVT), C };
1231 return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4);
1233 return CurDAG->SelectNodeTo(N.Val, X86::MOV32ri, PtrVT, C);
1237 // Other cases are handled by auto-generated code.
1241 case ISD::SMUL_LOHI:
1242 case ISD::UMUL_LOHI: {
1243 SDValue N0 = Node->getOperand(0);
1244 SDValue N1 = Node->getOperand(1);
1246 bool isSigned = Opcode == ISD::SMUL_LOHI;
1248 switch (NVT.getSimpleVT()) {
1249 default: assert(0 && "Unsupported VT!");
1250 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1251 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1252 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1253 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1256 switch (NVT.getSimpleVT()) {
1257 default: assert(0 && "Unsupported VT!");
1258 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1259 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1260 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1261 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1264 unsigned LoReg, HiReg;
1265 switch (NVT.getSimpleVT()) {
1266 default: assert(0 && "Unsupported VT!");
1267 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1268 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1269 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1270 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1273 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1274 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1275 // multiplty is commmutative
1277 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
1283 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
1284 N0, SDValue()).getValue(1);
1287 AddToISelQueue(N1.getOperand(0));
1288 AddToISelQueue(Tmp0);
1289 AddToISelQueue(Tmp1);
1290 AddToISelQueue(Tmp2);
1291 AddToISelQueue(Tmp3);
1292 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1294 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1295 InFlag = SDValue(CNode, 1);
1296 // Update the chain.
1297 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1301 SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1304 // Copy the low half of the result, if it is needed.
1305 if (!N.getValue(0).use_empty()) {
1306 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1307 LoReg, NVT, InFlag);
1308 InFlag = Result.getValue(2);
1309 ReplaceUses(N.getValue(0), Result);
1311 DOUT << std::string(Indent-2, ' ') << "=> ";
1312 DEBUG(Result.Val->dump(CurDAG));
1316 // Copy the high half of the result, if it is needed.
1317 if (!N.getValue(1).use_empty()) {
1319 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1320 // Prevent use of AH in a REX instruction by referencing AX instead.
1321 // Shift it down 8 bits.
1322 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1323 X86::AX, MVT::i16, InFlag);
1324 InFlag = Result.getValue(2);
1325 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1326 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1327 // Then truncate it down to i8.
1328 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1329 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1330 MVT::i8, Result, SRIdx), 0);
1332 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1333 HiReg, NVT, InFlag);
1334 InFlag = Result.getValue(2);
1336 ReplaceUses(N.getValue(1), Result);
1338 DOUT << std::string(Indent-2, ' ') << "=> ";
1339 DEBUG(Result.Val->dump(CurDAG));
1352 case ISD::UDIVREM: {
1353 SDValue N0 = Node->getOperand(0);
1354 SDValue N1 = Node->getOperand(1);
1356 bool isSigned = Opcode == ISD::SDIVREM;
1358 switch (NVT.getSimpleVT()) {
1359 default: assert(0 && "Unsupported VT!");
1360 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1361 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1362 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1363 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1366 switch (NVT.getSimpleVT()) {
1367 default: assert(0 && "Unsupported VT!");
1368 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1369 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1370 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1371 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1374 unsigned LoReg, HiReg;
1375 unsigned ClrOpcode, SExtOpcode;
1376 switch (NVT.getSimpleVT()) {
1377 default: assert(0 && "Unsupported VT!");
1379 LoReg = X86::AL; HiReg = X86::AH;
1381 SExtOpcode = X86::CBW;
1384 LoReg = X86::AX; HiReg = X86::DX;
1385 ClrOpcode = X86::MOV16r0;
1386 SExtOpcode = X86::CWD;
1389 LoReg = X86::EAX; HiReg = X86::EDX;
1390 ClrOpcode = X86::MOV32r0;
1391 SExtOpcode = X86::CDQ;
1394 LoReg = X86::RAX; HiReg = X86::RDX;
1395 ClrOpcode = X86::MOV64r0;
1396 SExtOpcode = X86::CQO;
1400 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1401 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1404 if (NVT == MVT::i8 && !isSigned) {
1405 // Special case for div8, just use a move with zero extension to AX to
1406 // clear the upper 8 bits (AH).
1407 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1408 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1409 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1410 AddToISelQueue(N0.getOperand(0));
1411 AddToISelQueue(Tmp0);
1412 AddToISelQueue(Tmp1);
1413 AddToISelQueue(Tmp2);
1414 AddToISelQueue(Tmp3);
1416 SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1418 Chain = Move.getValue(1);
1419 ReplaceUses(N0.getValue(1), Chain);
1423 SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1424 Chain = CurDAG->getEntryNode();
1426 Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDValue());
1427 InFlag = Chain.getValue(1);
1431 CurDAG->getCopyToReg(CurDAG->getEntryNode(),
1432 LoReg, N0, SDValue()).getValue(1);
1434 // Sign extend the low part into the high part.
1436 SDValue(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1438 // Zero out the high part, effectively zero extending the input.
1439 SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
1440 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
1441 ClrNode, InFlag).getValue(1);
1446 AddToISelQueue(N1.getOperand(0));
1447 AddToISelQueue(Tmp0);
1448 AddToISelQueue(Tmp1);
1449 AddToISelQueue(Tmp2);
1450 AddToISelQueue(Tmp3);
1451 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1453 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1454 InFlag = SDValue(CNode, 1);
1455 // Update the chain.
1456 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1460 SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1463 // Copy the division (low) result, if it is needed.
1464 if (!N.getValue(0).use_empty()) {
1465 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1466 LoReg, NVT, InFlag);
1467 InFlag = Result.getValue(2);
1468 ReplaceUses(N.getValue(0), Result);
1470 DOUT << std::string(Indent-2, ' ') << "=> ";
1471 DEBUG(Result.Val->dump(CurDAG));
1475 // Copy the remainder (high) result, if it is needed.
1476 if (!N.getValue(1).use_empty()) {
1478 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1479 // Prevent use of AH in a REX instruction by referencing AX instead.
1480 // Shift it down 8 bits.
1481 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1482 X86::AX, MVT::i16, InFlag);
1483 InFlag = Result.getValue(2);
1484 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1485 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1486 // Then truncate it down to i8.
1487 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1488 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1489 MVT::i8, Result, SRIdx), 0);
1491 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1492 HiReg, NVT, InFlag);
1493 InFlag = Result.getValue(2);
1495 ReplaceUses(N.getValue(1), Result);
1497 DOUT << std::string(Indent-2, ' ') << "=> ";
1498 DEBUG(Result.Val->dump(CurDAG));
1510 case ISD::ANY_EXTEND: {
1511 // Check if the type extended to supports subregs.
1515 SDValue N0 = Node->getOperand(0);
1516 // Get the subregsiter index for the type to extend.
1517 MVT N0VT = N0.getValueType();
1518 unsigned Idx = (N0VT == MVT::i32) ? X86::SUBREG_32BIT :
1519 (N0VT == MVT::i16) ? X86::SUBREG_16BIT :
1520 (Subtarget->is64Bit()) ? X86::SUBREG_8BIT : 0;
1522 // If we don't have a subreg Idx, let generated ISel have a try.
1526 // If we have an index, generate an insert_subreg into undef.
1529 SDValue(CurDAG->getTargetNode(X86::IMPLICIT_DEF, NVT), 0);
1530 SDValue SRIdx = CurDAG->getTargetConstant(Idx, MVT::i32);
1531 SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG,
1532 NVT, Undef, N0, SRIdx);
1535 DOUT << std::string(Indent-2, ' ') << "=> ";
1536 DEBUG(ResNode->dump(CurDAG));
1543 case ISD::SIGN_EXTEND_INREG: {
1544 SDValue N0 = Node->getOperand(0);
1547 MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
1548 SDValue TruncOp = SDValue(getTruncate(N0, SVT), 0);
1550 switch (NVT.getSimpleVT()) {
1551 default: assert(0 && "Unknown sign_extend_inreg!");
1553 if (SVT == MVT::i8) Opc = X86::MOVSX16rr8;
1554 else assert(0 && "Unknown sign_extend_inreg!");
1557 switch (SVT.getSimpleVT()) {
1558 default: assert(0 && "Unknown sign_extend_inreg!");
1559 case MVT::i8: Opc = X86::MOVSX32rr8; break;
1560 case MVT::i16: Opc = X86::MOVSX32rr16; break;
1564 switch (SVT.getSimpleVT()) {
1565 default: assert(0 && "Unknown sign_extend_inreg!");
1566 case MVT::i8: Opc = X86::MOVSX64rr8; break;
1567 case MVT::i16: Opc = X86::MOVSX64rr16; break;
1568 case MVT::i32: Opc = X86::MOVSX64rr32; break;
1573 SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
1576 DOUT << std::string(Indent-2, ' ') << "=> ";
1577 DEBUG(TruncOp.Val->dump(CurDAG));
1579 DOUT << std::string(Indent-2, ' ') << "=> ";
1580 DEBUG(ResNode->dump(CurDAG));
1588 case ISD::TRUNCATE: {
1589 SDValue Input = Node->getOperand(0);
1590 AddToISelQueue(Node->getOperand(0));
1591 SDNode *ResNode = getTruncate(Input, NVT);
1594 DOUT << std::string(Indent-2, ' ') << "=> ";
1595 DEBUG(ResNode->dump(CurDAG));
1603 case ISD::DECLARE: {
1604 // Handle DECLARE nodes here because the second operand may have been
1605 // wrapped in X86ISD::Wrapper.
1606 SDValue Chain = Node->getOperand(0);
1607 SDValue N1 = Node->getOperand(1);
1608 SDValue N2 = Node->getOperand(2);
1609 if (!isa<FrameIndexSDNode>(N1))
1611 int FI = cast<FrameIndexSDNode>(N1)->getIndex();
1612 if (N2.getOpcode() == ISD::ADD &&
1613 N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
1614 N2 = N2.getOperand(1);
1615 if (N2.getOpcode() == X86ISD::Wrapper &&
1616 isa<GlobalAddressSDNode>(N2.getOperand(0))) {
1618 cast<GlobalAddressSDNode>(N2.getOperand(0))->getGlobal();
1619 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1620 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
1621 AddToISelQueue(Chain);
1622 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1623 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
1624 MVT::Other, Ops, 3);
1630 SDNode *ResNode = SelectCode(N);
1633 DOUT << std::string(Indent-2, ' ') << "=> ";
1634 if (ResNode == NULL || ResNode == N.Val)
1635 DEBUG(N.Val->dump(CurDAG));
1637 DEBUG(ResNode->dump(CurDAG));
1645 bool X86DAGToDAGISel::
1646 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1647 std::vector<SDValue> &OutOps, SelectionDAG &DAG){
1648 SDValue Op0, Op1, Op2, Op3;
1649 switch (ConstraintCode) {
1650 case 'o': // offsetable ??
1651 case 'v': // not offsetable ??
1652 default: return true;
1654 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1659 OutOps.push_back(Op0);
1660 OutOps.push_back(Op1);
1661 OutOps.push_back(Op2);
1662 OutOps.push_back(Op3);
1663 AddToISelQueue(Op0);
1664 AddToISelQueue(Op1);
1665 AddToISelQueue(Op2);
1666 AddToISelQueue(Op3);
1670 /// createX86ISelDag - This pass converts a legalized DAG into a
1671 /// X86-specific DAG, ready for instruction scheduling.
1673 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1674 return new X86DAGToDAGISel(TM, Fast);