1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/ADT/Statistic.h"
44 STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
45 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
49 FoldAndInTest("x86-fold-and-in-test", cl::desc("Fold and operation in test"),
50 cl::init(true), cl::Hidden);
53 //===----------------------------------------------------------------------===//
54 // Pattern Matcher Implementation
55 //===----------------------------------------------------------------------===//
58 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
59 /// SDOperand's instead of register numbers for the leaves of the matched
61 struct X86ISelAddressMode {
67 struct { // This is really a union, discriminated by BaseType!
72 bool isRIPRel; // RIP as base?
80 unsigned Align; // CP alignment.
83 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
84 GV(0), CP(0), ES(0), JT(-1), Align(0) {
90 //===--------------------------------------------------------------------===//
91 /// ISel - X86 specific code to select X86 machine instructions for
92 /// SelectionDAG operations.
94 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
95 /// ContainsFPCode - Every instruction we select that uses or defines a FP
96 /// register should set this to true.
99 /// FastISel - Enable fast(er) instruction selection.
103 /// TM - Keep a reference to X86TargetMachine.
105 X86TargetMachine &TM;
107 /// X86Lowering - This object fully describes how to lower LLVM code to an
108 /// X86-specific SelectionDAG.
109 X86TargetLowering X86Lowering;
111 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
112 /// make the right decision when generating code for different targets.
113 const X86Subtarget *Subtarget;
115 /// GlobalBaseReg - keeps track of the virtual register mapped onto global
117 unsigned GlobalBaseReg;
120 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
121 : SelectionDAGISel(X86Lowering),
122 ContainsFPCode(false), FastISel(fast), TM(tm),
123 X86Lowering(*TM.getTargetLowering()),
124 Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
126 virtual bool runOnFunction(Function &Fn) {
127 // Make sure we re-emit a set of the global base reg if necessary
129 return SelectionDAGISel::runOnFunction(Fn);
132 virtual const char *getPassName() const {
133 return "X86 DAG->DAG Instruction Selection";
136 /// InstructionSelectBasicBlock - This callback is invoked by
137 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
138 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
140 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
142 virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const;
144 // Include the pieces autogenerated from the target description.
145 #include "X86GenDAGISel.inc"
148 SDNode *Select(SDOperand N);
150 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM,
151 bool isRoot = true, unsigned Depth = 0);
152 bool MatchAddressBase(SDOperand N, X86ISelAddressMode &AM,
153 bool isRoot, unsigned Depth);
154 bool SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
155 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
156 bool SelectLEAAddr(SDOperand Op, SDOperand N, SDOperand &Base,
157 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
158 bool SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
159 SDOperand N, SDOperand &Base, SDOperand &Scale,
160 SDOperand &Index, SDOperand &Disp,
161 SDOperand &InChain, SDOperand &OutChain);
162 bool TryFoldLoad(SDOperand P, SDOperand N,
163 SDOperand &Base, SDOperand &Scale,
164 SDOperand &Index, SDOperand &Disp);
165 void PreprocessForRMW(SelectionDAG &DAG);
166 void PreprocessForFPConvert(SelectionDAG &DAG);
168 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
169 /// inline asm expressions.
170 virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op,
172 std::vector<SDOperand> &OutOps,
175 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
177 inline void getAddressOperands(X86ISelAddressMode &AM, SDOperand &Base,
178 SDOperand &Scale, SDOperand &Index,
180 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
181 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
183 Scale = getI8Imm(AM.Scale);
185 // These are 32-bit even in 64-bit mode since RIP relative offset
188 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
190 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp);
192 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
193 else if (AM.JT != -1)
194 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
196 Disp = getI32Imm(AM.Disp);
199 /// getI8Imm - Return a target constant with the specified value, of type
201 inline SDOperand getI8Imm(unsigned Imm) {
202 return CurDAG->getTargetConstant(Imm, MVT::i8);
205 /// getI16Imm - Return a target constant with the specified value, of type
207 inline SDOperand getI16Imm(unsigned Imm) {
208 return CurDAG->getTargetConstant(Imm, MVT::i16);
211 /// getI32Imm - Return a target constant with the specified value, of type
213 inline SDOperand getI32Imm(unsigned Imm) {
214 return CurDAG->getTargetConstant(Imm, MVT::i32);
217 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
218 /// base register. Return the virtual register that holds this value.
219 SDNode *getGlobalBaseReg();
221 /// getTruncate - return an SDNode that implements a subreg based truncate
222 /// of the specified operand to the the specified value type.
223 SDNode *getTruncate(SDOperand N0, MVT::ValueType VT);
231 static SDNode *findFlagUse(SDNode *N) {
232 unsigned FlagResNo = N->getNumValues()-1;
233 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
235 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
236 SDOperand Op = User->getOperand(i);
237 if (Op.Val == N && Op.ResNo == FlagResNo)
244 static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
245 SDNode *Root, SDNode *Skip, bool &found,
246 std::set<SDNode *> &Visited) {
248 Use->getNodeId() > Def->getNodeId() ||
249 !Visited.insert(Use).second)
252 for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
253 SDNode *N = Use->getOperand(i).Val;
258 continue; // Immediate use is ok.
260 assert(Use->getOpcode() == ISD::STORE ||
261 Use->getOpcode() == X86ISD::CMP);
267 findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
271 /// isNonImmUse - Start searching from Root up the DAG to check is Def can
272 /// be reached. Return true if that's the case. However, ignore direct uses
273 /// by ImmedUse (which would be U in the example illustrated in
274 /// CanBeFoldedBy) and by Root (which can happen in the store case).
275 /// FIXME: to be really generic, we should allow direct use by any node
276 /// that is being folded. But realisticly since we only fold loads which
277 /// have one non-chain use, we only need to watch out for load/op/store
278 /// and load/op/cmp case where the root (store / cmp) may reach the load via
279 /// its chain operand.
280 static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
281 SDNode *Skip = NULL) {
282 std::set<SDNode *> Visited;
284 findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
289 bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const {
290 if (FastISel) return false;
292 // If U use can somehow reach N through another path then U can't fold N or
293 // it will create a cycle. e.g. In the following diagram, U can reach N
294 // through X. If N is folded into into U, then X is both a predecessor and
305 if (isNonImmUse(Root, N, U))
308 // If U produces a flag, then it gets (even more) interesting. Since it
309 // would have been "glued" together with its flag use, we need to check if
322 // If FU (flag use) indirectly reach N (the load), and U fold N (call it
323 // NU), then TF is a predecessor of FU and a successor of NU. But since
324 // NU and FU are flagged together, this effectively creates a cycle.
325 bool HasFlagUse = false;
326 MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1);
327 while ((VT == MVT::Flag && !Root->use_empty())) {
328 SDNode *FU = findFlagUse(Root);
335 VT = Root->getValueType(Root->getNumValues()-1);
339 return !isNonImmUse(Root, N, Root, U);
343 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
344 /// and move load below the TokenFactor. Replace store's chain operand with
345 /// load's chain result.
346 static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load,
347 SDOperand Store, SDOperand TF) {
348 std::vector<SDOperand> Ops;
349 for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i)
350 if (Load.Val == TF.Val->getOperand(i).Val)
351 Ops.push_back(Load.Val->getOperand(0));
353 Ops.push_back(TF.Val->getOperand(i));
354 DAG.UpdateNodeOperands(TF, &Ops[0], Ops.size());
355 DAG.UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
356 DAG.UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
357 Store.getOperand(2), Store.getOperand(3));
360 /// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
361 /// This is only run if not in -fast mode (aka -O0).
362 /// This allows the instruction selector to pick more read-modify-write
363 /// instructions. This is a common case:
373 /// [TokenFactor] [Op]
380 /// The fact the store's chain operand != load's chain will prevent the
381 /// (store (op (load))) instruction from being selected. We can transform it to:
400 void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
401 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
402 E = DAG.allnodes_end(); I != E; ++I) {
403 if (!ISD::isNON_TRUNCStore(I))
405 SDOperand Chain = I->getOperand(0);
406 if (Chain.Val->getOpcode() != ISD::TokenFactor)
409 SDOperand N1 = I->getOperand(1);
410 SDOperand N2 = I->getOperand(2);
411 if (MVT::isFloatingPoint(N1.getValueType()) ||
412 MVT::isVector(N1.getValueType()) ||
418 unsigned Opcode = N1.Val->getOpcode();
427 SDOperand N10 = N1.getOperand(0);
428 SDOperand N11 = N1.getOperand(1);
429 if (ISD::isNON_EXTLoad(N10.Val))
431 else if (ISD::isNON_EXTLoad(N11.Val)) {
435 RModW = RModW && N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
436 (N10.getOperand(1) == N2) &&
437 (N10.Val->getValueType(0) == N1.getValueType());
452 SDOperand N10 = N1.getOperand(0);
453 if (ISD::isNON_EXTLoad(N10.Val))
454 RModW = N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
455 (N10.getOperand(1) == N2) &&
456 (N10.Val->getValueType(0) == N1.getValueType());
464 MoveBelowTokenFactor(DAG, Load, SDOperand(I, 0), Chain);
471 /// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
472 /// nodes that target the FP stack to be store and load to the stack. This is a
473 /// gross hack. We would like to simply mark these as being illegal, but when
474 /// we do that, legalize produces these when it expands calls, then expands
475 /// these in the same legalize pass. We would like dag combine to be able to
476 /// hack on these between the call expansion and the node legalization. As such
477 /// this pass basically does "really late" legalization of these inline with the
479 void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) {
480 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
481 E = DAG.allnodes_end(); I != E; ) {
482 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
483 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
486 // If the source and destination are SSE registers, then this is a legal
487 // conversion that should not be lowered.
488 MVT::ValueType SrcVT = N->getOperand(0).getValueType();
489 MVT::ValueType DstVT = N->getValueType(0);
490 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
491 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
492 if (SrcIsSSE && DstIsSSE)
495 // If this is an FPStack extension (but not a truncation), it is a noop.
496 if (!SrcIsSSE && !DstIsSSE && N->getOpcode() == ISD::FP_EXTEND)
499 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
500 // FPStack has extload and truncstore. SSE can fold direct loads into other
501 // operations. Based on this, decide what we want to do.
502 MVT::ValueType MemVT;
503 if (N->getOpcode() == ISD::FP_ROUND)
504 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
506 MemVT = SrcIsSSE ? SrcVT : DstVT;
508 SDOperand MemTmp = DAG.CreateStackTemporary(MemVT);
510 // FIXME: optimize the case where the src/dest is a load or store?
511 SDOperand Store = DAG.getTruncStore(DAG.getEntryNode(), N->getOperand(0),
512 MemTmp, NULL, 0, MemVT);
513 SDOperand Result = DAG.getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
516 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
517 // extload we created. This will cause general havok on the dag because
518 // anything below the conversion could be folded into other existing nodes.
519 // To avoid invalidating 'I', back it up to the convert node.
521 DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result);
523 // Now that we did that, the node is dead. Increment the iterator to the
524 // next node to process, then delete N.
530 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
531 /// when it has created a SelectionDAG for us to codegen.
532 void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
534 MachineFunction::iterator FirstMBB = BB;
537 PreprocessForRMW(DAG);
539 // FIXME: This should only happen when not -fast.
540 PreprocessForFPConvert(DAG);
542 // Codegen the basic block.
544 DOUT << "===== Instruction selection begins:\n";
547 DAG.setRoot(SelectRoot(DAG.getRoot()));
549 DOUT << "===== Instruction selection ends:\n";
552 DAG.RemoveDeadNodes();
554 // Emit machine code to BB.
555 ScheduleAndEmitDAG(DAG);
557 // If we are emitting FP stack code, scan the basic block to determine if this
558 // block defines any FP values. If so, put an FP_REG_KILL instruction before
559 // the terminator of the block.
561 // Note that FP stack instructions are used in all modes for long double,
562 // so we always need to do this check.
563 // Also note that it's possible for an FP stack register to be live across
564 // an instruction that produces multiple basic blocks (SSE CMOV) so we
565 // must check all the generated basic blocks.
567 // Scan all of the machine instructions in these MBBs, checking for FP
568 // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
569 MachineFunction::iterator MBBI = FirstMBB;
571 bool ContainsFPCode = false;
572 for (MachineBasicBlock::iterator I = MBBI->begin(), E = MBBI->end();
573 !ContainsFPCode && I != E; ++I) {
574 if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
575 const TargetRegisterClass *clas;
576 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
577 if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
578 TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
579 ((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
580 X86::RFP32RegisterClass ||
581 clas == X86::RFP64RegisterClass ||
582 clas == X86::RFP80RegisterClass)) {
583 ContainsFPCode = true;
589 // Check PHI nodes in successor blocks. These PHI's will be lowered to have
590 // a copy of the input value in this block. In SSE mode, we only care about
592 if (!ContainsFPCode) {
593 // Final check, check LLVM BB's that are successors to the LLVM BB
594 // corresponding to BB for FP PHI nodes.
595 const BasicBlock *LLVMBB = BB->getBasicBlock();
597 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
598 !ContainsFPCode && SI != E; ++SI) {
599 for (BasicBlock::const_iterator II = SI->begin();
600 (PN = dyn_cast<PHINode>(II)); ++II) {
601 if (PN->getType()==Type::X86_FP80Ty ||
602 (!Subtarget->hasSSE1() && PN->getType()->isFloatingPoint()) ||
603 (!Subtarget->hasSSE2() && PN->getType()==Type::DoubleTy)) {
604 ContainsFPCode = true;
610 // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
611 if (ContainsFPCode) {
612 BuildMI(*MBBI, MBBI->getFirstTerminator(),
613 TM.getInstrInfo()->get(X86::FP_REG_KILL));
616 } while (&*(MBBI++) != BB);
619 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
620 /// the main function.
621 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
622 MachineFrameInfo *MFI) {
623 const TargetInstrInfo *TII = TM.getInstrInfo();
624 if (Subtarget->isTargetCygMing())
625 BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
628 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
629 // If this is main, emit special code for main.
630 MachineBasicBlock *BB = MF.begin();
631 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
632 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
635 /// MatchAddress - Add the specified node to the specified addressing mode,
636 /// returning true if it cannot be done. This just pattern matches for the
638 bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
639 bool isRoot, unsigned Depth) {
642 return MatchAddressBase(N, AM, isRoot, Depth);
644 // RIP relative addressing: %rip + 32-bit displacement!
646 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
647 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
648 if (isInt32(AM.Disp + Val)) {
656 int id = N.Val->getNodeId();
657 bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
659 switch (N.getOpcode()) {
661 case ISD::Constant: {
662 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
663 if (isInt32(AM.Disp + Val)) {
670 case X86ISD::Wrapper: {
671 bool is64Bit = Subtarget->is64Bit();
672 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
673 // Also, base and index reg must be 0 in order to use rip as base.
674 if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
675 AM.Base.Reg.Val || AM.IndexReg.Val))
677 if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
679 // If value is available in a register both base and index components have
680 // been picked, we can't fit the result available in the register in the
681 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
682 if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
683 SDOperand N0 = N.getOperand(0);
684 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
685 GlobalValue *GV = G->getGlobal();
687 AM.Disp += G->getOffset();
688 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
689 Subtarget->isPICStyleRIPRel();
691 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
692 AM.CP = CP->getConstVal();
693 AM.Align = CP->getAlignment();
694 AM.Disp += CP->getOffset();
695 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
696 Subtarget->isPICStyleRIPRel();
698 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
699 AM.ES = S->getSymbol();
700 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
701 Subtarget->isPICStyleRIPRel();
703 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
704 AM.JT = J->getIndex();
705 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
706 Subtarget->isPICStyleRIPRel();
713 case ISD::FrameIndex:
714 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
715 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
716 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
722 if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1 || AM.isRIPRel)
725 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
726 unsigned Val = CN->getValue();
727 if (Val == 1 || Val == 2 || Val == 3) {
729 SDOperand ShVal = N.Val->getOperand(0);
731 // Okay, we know that we have a scale by now. However, if the scaled
732 // value is an add of something and a constant, we can fold the
733 // constant into the disp field here.
734 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
735 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
736 AM.IndexReg = ShVal.Val->getOperand(0);
737 ConstantSDNode *AddVal =
738 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
739 uint64_t Disp = AM.Disp + (AddVal->getValue() << Val);
754 // A mul_lohi where we need the low part can be folded as a plain multiply.
755 if (N.ResNo != 0) break;
758 // X*[3,5,9] -> X+X*[2,4,8]
759 if (!AlreadySelected &&
760 AM.BaseType == X86ISelAddressMode::RegBase &&
761 AM.Base.Reg.Val == 0 &&
762 AM.IndexReg.Val == 0 &&
764 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
765 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
766 AM.Scale = unsigned(CN->getValue())-1;
768 SDOperand MulVal = N.Val->getOperand(0);
771 // Okay, we know that we have a scale by now. However, if the scaled
772 // value is an add of something and a constant, we can fold the
773 // constant into the disp field here.
774 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
775 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
776 Reg = MulVal.Val->getOperand(0);
777 ConstantSDNode *AddVal =
778 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
779 uint64_t Disp = AM.Disp + AddVal->getValue() * CN->getValue();
783 Reg = N.Val->getOperand(0);
785 Reg = N.Val->getOperand(0);
788 AM.IndexReg = AM.Base.Reg = Reg;
795 if (!AlreadySelected) {
796 X86ISelAddressMode Backup = AM;
797 if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
798 !MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
801 if (!MatchAddress(N.Val->getOperand(1), AM, false, Depth+1) &&
802 !MatchAddress(N.Val->getOperand(0), AM, false, Depth+1))
809 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
810 if (AlreadySelected) break;
812 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
813 X86ISelAddressMode Backup = AM;
814 // Start with the LHS as an addr mode.
815 if (!MatchAddress(N.getOperand(0), AM, false) &&
816 // Address could not have picked a GV address for the displacement.
818 // On x86-64, the resultant disp must fit in 32-bits.
819 isInt32(AM.Disp + CN->getSignExtended()) &&
820 // Check to see if the LHS & C is zero.
821 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getValue())) {
822 AM.Disp += CN->getValue();
830 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
831 // allows us to fold the shift into this addressing mode.
832 if (AlreadySelected) break;
833 SDOperand Shift = N.getOperand(0);
834 if (Shift.getOpcode() != ISD::SHL) break;
836 // Scale must not be used already.
837 if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
839 // Not when RIP is used as the base.
840 if (AM.isRIPRel) break;
842 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
843 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
844 if (!C1 || !C2) break;
846 // Not likely to be profitable if either the AND or SHIFT node has more
847 // than one use (unless all uses are for address computation). Besides,
848 // isel mechanism requires their node ids to be reused.
849 if (!N.hasOneUse() || !Shift.hasOneUse())
852 // Verify that the shift amount is something we can fold.
853 unsigned ShiftCst = C1->getValue();
854 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
857 // Get the new AND mask, this folds to a constant.
858 SDOperand NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
859 SDOperand(C2, 0), SDOperand(C1, 0));
860 SDOperand NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
861 Shift.getOperand(0), NewANDMask);
862 NewANDMask.Val->setNodeId(Shift.Val->getNodeId());
863 NewAND.Val->setNodeId(N.Val->getNodeId());
865 AM.Scale = 1 << ShiftCst;
866 AM.IndexReg = NewAND;
871 return MatchAddressBase(N, AM, isRoot, Depth);
874 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
875 /// specified addressing mode without any further recursion.
876 bool X86DAGToDAGISel::MatchAddressBase(SDOperand N, X86ISelAddressMode &AM,
877 bool isRoot, unsigned Depth) {
878 // Is the base register already occupied?
879 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
880 // If so, check to see if the scale index register is set.
881 if (AM.IndexReg.Val == 0 && !AM.isRIPRel) {
887 // Otherwise, we cannot select it.
891 // Default, generate it as a register.
892 AM.BaseType = X86ISelAddressMode::RegBase;
897 /// SelectAddr - returns true if it is able pattern match an addressing mode.
898 /// It returns the operands which make up the maximal addressing mode it can
899 /// match by reference.
900 bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
901 SDOperand &Scale, SDOperand &Index,
903 X86ISelAddressMode AM;
904 if (MatchAddress(N, AM))
907 MVT::ValueType VT = N.getValueType();
908 if (AM.BaseType == X86ISelAddressMode::RegBase) {
909 if (!AM.Base.Reg.Val)
910 AM.Base.Reg = CurDAG->getRegister(0, VT);
913 if (!AM.IndexReg.Val)
914 AM.IndexReg = CurDAG->getRegister(0, VT);
916 getAddressOperands(AM, Base, Scale, Index, Disp);
920 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
922 static inline bool isZeroNode(SDOperand Elt) {
923 return ((isa<ConstantSDNode>(Elt) &&
924 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
925 (isa<ConstantFPSDNode>(Elt) &&
926 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
930 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
931 /// match a load whose top elements are either undef or zeros. The load flavor
932 /// is derived from the type of N, which is either v4f32 or v2f64.
933 bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
934 SDOperand N, SDOperand &Base,
935 SDOperand &Scale, SDOperand &Index,
936 SDOperand &Disp, SDOperand &InChain,
937 SDOperand &OutChain) {
938 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
939 InChain = N.getOperand(0).getValue(1);
940 if (ISD::isNON_EXTLoad(InChain.Val) &&
941 InChain.getValue(0).hasOneUse() &&
943 CanBeFoldedBy(N.Val, Pred.Val, Op.Val)) {
944 LoadSDNode *LD = cast<LoadSDNode>(InChain);
945 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
947 OutChain = LD->getChain();
952 // Also handle the case where we explicitly require zeros in the top
953 // elements. This is a vector shuffle from the zero vector.
954 if (N.getOpcode() == ISD::VECTOR_SHUFFLE && N.Val->hasOneUse() &&
955 // Check to see if the top elements are all zeros (or bitcast of zeros).
956 ISD::isBuildVectorAllZeros(N.getOperand(0).Val) &&
957 N.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR &&
958 N.getOperand(1).Val->hasOneUse() &&
959 ISD::isNON_EXTLoad(N.getOperand(1).getOperand(0).Val) &&
960 N.getOperand(1).getOperand(0).hasOneUse()) {
961 // Check to see if the shuffle mask is 4/L/L/L or 2/L, where L is something
963 unsigned VecWidth=MVT::getVectorNumElements(N.getOperand(0).getValueType());
964 SDOperand ShufMask = N.getOperand(2);
965 assert(ShufMask.getOpcode() == ISD::BUILD_VECTOR && "Invalid shuf mask!");
966 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(ShufMask.getOperand(0))) {
967 if (C->getValue() == VecWidth) {
968 for (unsigned i = 1; i != VecWidth; ++i) {
969 if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF) {
972 ConstantSDNode *C = cast<ConstantSDNode>(ShufMask.getOperand(i));
973 if (C->getValue() >= VecWidth) return false;
978 // Okay, this is a zero extending load. Fold it.
979 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(1).getOperand(0));
980 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
982 OutChain = LD->getChain();
983 InChain = SDOperand(LD, 1);
991 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
992 /// mode it matches can be cost effectively emitted as an LEA instruction.
993 bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N,
994 SDOperand &Base, SDOperand &Scale,
995 SDOperand &Index, SDOperand &Disp) {
996 X86ISelAddressMode AM;
997 if (MatchAddress(N, AM))
1000 MVT::ValueType VT = N.getValueType();
1001 unsigned Complexity = 0;
1002 if (AM.BaseType == X86ISelAddressMode::RegBase)
1003 if (AM.Base.Reg.Val)
1006 AM.Base.Reg = CurDAG->getRegister(0, VT);
1007 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1010 if (AM.IndexReg.Val)
1013 AM.IndexReg = CurDAG->getRegister(0, VT);
1015 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1020 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1021 // to a LEA. This is determined with some expermentation but is by no means
1022 // optimal (especially for code size consideration). LEA is nice because of
1023 // its three-address nature. Tweak the cost function again when we can run
1024 // convertToThreeAddress() at register allocation time.
1025 if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
1026 // For X86-64, we should always use lea to materialize RIP relative
1028 if (Subtarget->is64Bit())
1034 if (AM.Disp && (AM.Base.Reg.Val || AM.IndexReg.Val))
1037 if (Complexity > 2) {
1038 getAddressOperands(AM, Base, Scale, Index, Disp);
1044 bool X86DAGToDAGISel::TryFoldLoad(SDOperand P, SDOperand N,
1045 SDOperand &Base, SDOperand &Scale,
1046 SDOperand &Index, SDOperand &Disp) {
1047 if (ISD::isNON_EXTLoad(N.Val) &&
1049 CanBeFoldedBy(N.Val, P.Val, P.Val))
1050 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
1054 /// getGlobalBaseReg - Output the instructions required to put the
1055 /// base address to use for accessing globals into a register.
1057 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1058 assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing");
1059 if (!GlobalBaseReg) {
1060 // Insert the set of GlobalBaseReg into the first MBB of the function
1061 MachineFunction *MF = BB->getParent();
1062 MachineBasicBlock &FirstMBB = MF->front();
1063 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
1064 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1065 unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
1067 const TargetInstrInfo *TII = TM.getInstrInfo();
1068 // Operand of MovePCtoStack is completely ignored by asm printer. It's
1069 // only used in JIT code emission as displacement to pc.
1070 BuildMI(FirstMBB, MBBI, TII->get(X86::MOVPC32r), PC).addImm(0);
1072 // If we're using vanilla 'GOT' PIC style, we should use relative addressing
1073 // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external
1074 if (TM.getRelocationModel() == Reloc::PIC_ &&
1075 Subtarget->isPICStyleGOT()) {
1076 GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
1077 BuildMI(FirstMBB, MBBI, TII->get(X86::ADD32ri), GlobalBaseReg)
1078 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
1084 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
1087 static SDNode *FindCallStartFromCall(SDNode *Node) {
1088 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1089 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1090 "Node doesn't have a token chain argument!");
1091 return FindCallStartFromCall(Node->getOperand(0).Val);
1094 SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) {
1098 SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1099 // Ensure that the source register has an 8-bit subreg on 32-bit targets
1100 if (!Subtarget->is64Bit()) {
1103 switch (N0.getValueType()) {
1104 default: assert(0 && "Unknown truncate!");
1106 Opc = X86::MOV16to16_;
1110 Opc = X86::MOV32to32_;
1114 N0 = SDOperand(CurDAG->getTargetNode(Opc, VT, MVT::Flag, N0), 0);
1115 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1116 VT, N0, SRIdx, N0.getValue(1));
1120 SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
1123 SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
1125 default: assert(0 && "Unknown truncate!"); break;
1127 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx);
1131 SDNode *X86DAGToDAGISel::Select(SDOperand N) {
1132 SDNode *Node = N.Val;
1133 MVT::ValueType NVT = Node->getValueType(0);
1135 unsigned Opcode = Node->getOpcode();
1138 DOUT << std::string(Indent, ' ') << "Selecting: ";
1139 DEBUG(Node->dump(CurDAG));
1144 if (Opcode >= ISD::BUILTIN_OP_END && Opcode < X86ISD::FIRST_NUMBER) {
1146 DOUT << std::string(Indent-2, ' ') << "== ";
1147 DEBUG(Node->dump(CurDAG));
1151 return NULL; // Already selected.
1156 case X86ISD::GlobalBaseReg:
1157 return getGlobalBaseReg();
1159 case X86ISD::FP_GET_RESULT2: {
1160 SDOperand Chain = N.getOperand(0);
1161 SDOperand InFlag = N.getOperand(1);
1162 AddToISelQueue(Chain);
1163 AddToISelQueue(InFlag);
1164 std::vector<MVT::ValueType> Tys;
1165 Tys.push_back(MVT::f80);
1166 Tys.push_back(MVT::f80);
1167 Tys.push_back(MVT::Other);
1168 Tys.push_back(MVT::Flag);
1169 SDOperand Ops[] = { Chain, InFlag };
1170 SDNode *ResNode = CurDAG->getTargetNode(X86::FpGETRESULT80x2, Tys,
1172 Chain = SDOperand(ResNode, 2);
1173 InFlag = SDOperand(ResNode, 3);
1174 ReplaceUses(SDOperand(N.Val, 2), Chain);
1175 ReplaceUses(SDOperand(N.Val, 3), InFlag);
1180 // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
1181 // code and is matched first so to prevent it from being turned into
1183 // In 64-bit small code size mode, use LEA to take advantage of
1184 // RIP-relative addressing.
1185 if (TM.getCodeModel() != CodeModel::Small)
1187 MVT::ValueType PtrVT = TLI.getPointerTy();
1188 SDOperand N0 = N.getOperand(0);
1189 SDOperand N1 = N.getOperand(1);
1190 if (N.Val->getValueType(0) == PtrVT &&
1191 N0.getOpcode() == X86ISD::Wrapper &&
1192 N1.getOpcode() == ISD::Constant) {
1193 unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue();
1195 // TODO: handle ExternalSymbolSDNode.
1196 if (GlobalAddressSDNode *G =
1197 dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
1198 C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
1199 G->getOffset() + Offset);
1200 } else if (ConstantPoolSDNode *CP =
1201 dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
1202 C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
1204 CP->getOffset()+Offset);
1208 if (Subtarget->is64Bit()) {
1209 SDOperand Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
1210 CurDAG->getRegister(0, PtrVT), C };
1211 return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4);
1213 return CurDAG->SelectNodeTo(N.Val, X86::MOV32ri, PtrVT, C);
1217 // Other cases are handled by auto-generated code.
1221 case ISD::SMUL_LOHI:
1222 case ISD::UMUL_LOHI: {
1223 SDOperand N0 = Node->getOperand(0);
1224 SDOperand N1 = Node->getOperand(1);
1226 // There are several forms of IMUL that just return the low part and
1227 // don't have fixed-register operands. If we don't need the high part,
1228 // use these instead. They can be selected with the generated ISel code.
1229 if (NVT != MVT::i8 &&
1230 N.getValue(1).use_empty()) {
1231 N = CurDAG->getNode(ISD::MUL, NVT, N0, N1);
1235 bool isSigned = Opcode == ISD::SMUL_LOHI;
1238 default: assert(0 && "Unsupported VT!");
1239 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1240 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1241 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1242 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1246 default: assert(0 && "Unsupported VT!");
1247 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1248 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1249 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1250 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1253 unsigned LoReg, HiReg;
1255 default: assert(0 && "Unsupported VT!");
1256 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1257 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1258 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1259 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1262 SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
1263 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1264 // multiplty is commmutative
1266 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
1272 SDOperand InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
1273 N0, SDOperand()).getValue(1);
1276 AddToISelQueue(N1.getOperand(0));
1277 AddToISelQueue(Tmp0);
1278 AddToISelQueue(Tmp1);
1279 AddToISelQueue(Tmp2);
1280 AddToISelQueue(Tmp3);
1281 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1283 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1284 InFlag = SDOperand(CNode, 1);
1285 // Update the chain.
1286 ReplaceUses(N1.getValue(1), SDOperand(CNode, 0));
1290 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1293 // Copy the low half of the result, if it is needed.
1294 if (!N.getValue(0).use_empty()) {
1295 SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1296 LoReg, NVT, InFlag);
1297 InFlag = Result.getValue(2);
1298 ReplaceUses(N.getValue(0), Result);
1300 DOUT << std::string(Indent-2, ' ') << "=> ";
1301 DEBUG(Result.Val->dump(CurDAG));
1305 // Copy the high half of the result, if it is needed.
1306 if (!N.getValue(1).use_empty()) {
1308 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1309 // Prevent use of AH in a REX instruction by referencing AX instead.
1310 // Shift it down 8 bits.
1311 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1312 X86::AX, MVT::i16, InFlag);
1313 InFlag = Result.getValue(2);
1314 Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1315 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1316 // Then truncate it down to i8.
1317 SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1318 Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1319 MVT::i8, Result, SRIdx), 0);
1321 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1322 HiReg, NVT, InFlag);
1323 InFlag = Result.getValue(2);
1325 ReplaceUses(N.getValue(1), Result);
1327 DOUT << std::string(Indent-2, ' ') << "=> ";
1328 DEBUG(Result.Val->dump(CurDAG));
1341 case ISD::UDIVREM: {
1342 SDOperand N0 = Node->getOperand(0);
1343 SDOperand N1 = Node->getOperand(1);
1345 bool isSigned = Opcode == ISD::SDIVREM;
1348 default: assert(0 && "Unsupported VT!");
1349 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1350 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1351 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1352 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1356 default: assert(0 && "Unsupported VT!");
1357 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1358 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1359 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1360 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1363 unsigned LoReg, HiReg;
1364 unsigned ClrOpcode, SExtOpcode;
1366 default: assert(0 && "Unsupported VT!");
1368 LoReg = X86::AL; HiReg = X86::AH;
1370 SExtOpcode = X86::CBW;
1373 LoReg = X86::AX; HiReg = X86::DX;
1374 ClrOpcode = X86::MOV16r0;
1375 SExtOpcode = X86::CWD;
1378 LoReg = X86::EAX; HiReg = X86::EDX;
1379 ClrOpcode = X86::MOV32r0;
1380 SExtOpcode = X86::CDQ;
1383 LoReg = X86::RAX; HiReg = X86::RDX;
1384 ClrOpcode = X86::MOV64r0;
1385 SExtOpcode = X86::CQO;
1389 SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
1390 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1393 if (NVT == MVT::i8 && !isSigned) {
1394 // Special case for div8, just use a move with zero extension to AX to
1395 // clear the upper 8 bits (AH).
1396 SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1397 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1398 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1399 AddToISelQueue(N0.getOperand(0));
1400 AddToISelQueue(Tmp0);
1401 AddToISelQueue(Tmp1);
1402 AddToISelQueue(Tmp2);
1403 AddToISelQueue(Tmp3);
1405 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1407 Chain = Move.getValue(1);
1408 ReplaceUses(N0.getValue(1), Chain);
1412 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1413 Chain = CurDAG->getEntryNode();
1415 Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDOperand());
1416 InFlag = Chain.getValue(1);
1420 CurDAG->getCopyToReg(CurDAG->getEntryNode(),
1421 LoReg, N0, SDOperand()).getValue(1);
1423 // Sign extend the low part into the high part.
1425 SDOperand(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1427 // Zero out the high part, effectively zero extending the input.
1428 SDOperand ClrNode = SDOperand(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
1429 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
1430 ClrNode, InFlag).getValue(1);
1435 AddToISelQueue(N1.getOperand(0));
1436 AddToISelQueue(Tmp0);
1437 AddToISelQueue(Tmp1);
1438 AddToISelQueue(Tmp2);
1439 AddToISelQueue(Tmp3);
1440 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1442 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1443 InFlag = SDOperand(CNode, 1);
1444 // Update the chain.
1445 ReplaceUses(N1.getValue(1), SDOperand(CNode, 0));
1449 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1452 // Copy the division (low) result, if it is needed.
1453 if (!N.getValue(0).use_empty()) {
1454 SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1455 LoReg, NVT, InFlag);
1456 InFlag = Result.getValue(2);
1457 ReplaceUses(N.getValue(0), Result);
1459 DOUT << std::string(Indent-2, ' ') << "=> ";
1460 DEBUG(Result.Val->dump(CurDAG));
1464 // Copy the remainder (high) result, if it is needed.
1465 if (!N.getValue(1).use_empty()) {
1467 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1468 // Prevent use of AH in a REX instruction by referencing AX instead.
1469 // Shift it down 8 bits.
1470 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1471 X86::AX, MVT::i16, InFlag);
1472 InFlag = Result.getValue(2);
1473 Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1474 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1475 // Then truncate it down to i8.
1476 SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1477 Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1478 MVT::i8, Result, SRIdx), 0);
1480 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1481 HiReg, NVT, InFlag);
1482 InFlag = Result.getValue(2);
1484 ReplaceUses(N.getValue(1), Result);
1486 DOUT << std::string(Indent-2, ' ') << "=> ";
1487 DEBUG(Result.Val->dump(CurDAG));
1499 case ISD::ANY_EXTEND: {
1500 SDOperand N0 = Node->getOperand(0);
1502 if (NVT == MVT::i64 || NVT == MVT::i32 || NVT == MVT::i16) {
1504 switch(N0.getValueType()) {
1506 SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
1509 SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
1512 if (Subtarget->is64Bit())
1513 SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1515 default: assert(0 && "Unknown any_extend!");
1518 SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG,
1522 DOUT << std::string(Indent-2, ' ') << "=> ";
1523 DEBUG(ResNode->dump(CurDAG));
1528 } // Otherwise let generated ISel handle it.
1533 case ISD::SIGN_EXTEND_INREG: {
1534 SDOperand N0 = Node->getOperand(0);
1537 MVT::ValueType SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
1538 SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0);
1542 if (SVT == MVT::i8) Opc = X86::MOVSX16rr8;
1543 else assert(0 && "Unknown sign_extend_inreg!");
1547 case MVT::i8: Opc = X86::MOVSX32rr8; break;
1548 case MVT::i16: Opc = X86::MOVSX32rr16; break;
1549 default: assert(0 && "Unknown sign_extend_inreg!");
1554 case MVT::i8: Opc = X86::MOVSX64rr8; break;
1555 case MVT::i16: Opc = X86::MOVSX64rr16; break;
1556 case MVT::i32: Opc = X86::MOVSX64rr32; break;
1557 default: assert(0 && "Unknown sign_extend_inreg!");
1560 default: assert(0 && "Unknown sign_extend_inreg!");
1563 SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
1566 DOUT << std::string(Indent-2, ' ') << "=> ";
1567 DEBUG(TruncOp.Val->dump(CurDAG));
1569 DOUT << std::string(Indent-2, ' ') << "=> ";
1570 DEBUG(ResNode->dump(CurDAG));
1578 case ISD::TRUNCATE: {
1579 SDOperand Input = Node->getOperand(0);
1580 AddToISelQueue(Node->getOperand(0));
1581 SDNode *ResNode = getTruncate(Input, NVT);
1584 DOUT << std::string(Indent-2, ' ') << "=> ";
1585 DEBUG(ResNode->dump(CurDAG));
1594 SDNode *ResNode = SelectCode(N);
1597 DOUT << std::string(Indent-2, ' ') << "=> ";
1598 if (ResNode == NULL || ResNode == N.Val)
1599 DEBUG(N.Val->dump(CurDAG));
1601 DEBUG(ResNode->dump(CurDAG));
1609 bool X86DAGToDAGISel::
1610 SelectInlineAsmMemoryOperand(const SDOperand &Op, char ConstraintCode,
1611 std::vector<SDOperand> &OutOps, SelectionDAG &DAG){
1612 SDOperand Op0, Op1, Op2, Op3;
1613 switch (ConstraintCode) {
1614 case 'o': // offsetable ??
1615 case 'v': // not offsetable ??
1616 default: return true;
1618 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1623 OutOps.push_back(Op0);
1624 OutOps.push_back(Op1);
1625 OutOps.push_back(Op2);
1626 OutOps.push_back(Op3);
1627 AddToISelQueue(Op0);
1628 AddToISelQueue(Op1);
1629 AddToISelQueue(Op2);
1630 AddToISelQueue(Op3);
1634 /// createX86ISelDag - This pass converts a legalized DAG into a
1635 /// X86-specific DAG, ready for instruction scheduling.
1637 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1638 return new X86DAGToDAGISel(TM, Fast);