1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/ADT/Statistic.h"
44 STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
45 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
49 AlwaysFoldAndInTest("always-fold-and-in-test",
50 cl::desc("Always fold and operation in test"),
51 cl::init(true), cl::Hidden);
54 //===----------------------------------------------------------------------===//
55 // Pattern Matcher Implementation
56 //===----------------------------------------------------------------------===//
59 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
60 /// SDOperand's instead of register numbers for the leaves of the matched
62 struct X86ISelAddressMode {
68 struct { // This is really a union, discriminated by BaseType!
73 bool isRIPRel; // RIP as base?
81 unsigned Align; // CP alignment.
84 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
85 GV(0), CP(0), ES(0), JT(-1), Align(0) {
91 //===--------------------------------------------------------------------===//
92 /// ISel - X86 specific code to select X86 machine instructions for
93 /// SelectionDAG operations.
95 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
96 /// ContainsFPCode - Every instruction we select that uses or defines a FP
97 /// register should set this to true.
100 /// FastISel - Enable fast(er) instruction selection.
104 /// TM - Keep a reference to X86TargetMachine.
106 X86TargetMachine &TM;
108 /// X86Lowering - This object fully describes how to lower LLVM code to an
109 /// X86-specific SelectionDAG.
110 X86TargetLowering X86Lowering;
112 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
113 /// make the right decision when generating code for different targets.
114 const X86Subtarget *Subtarget;
116 /// GlobalBaseReg - keeps track of the virtual register mapped onto global
118 unsigned GlobalBaseReg;
121 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
122 : SelectionDAGISel(X86Lowering),
123 ContainsFPCode(false), FastISel(fast), TM(tm),
124 X86Lowering(*TM.getTargetLowering()),
125 Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
127 virtual bool runOnFunction(Function &Fn) {
128 // Make sure we re-emit a set of the global base reg if necessary
130 return SelectionDAGISel::runOnFunction(Fn);
133 virtual const char *getPassName() const {
134 return "X86 DAG->DAG Instruction Selection";
137 /// InstructionSelectBasicBlock - This callback is invoked by
138 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
139 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
141 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
143 virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const;
145 // Include the pieces autogenerated from the target description.
146 #include "X86GenDAGISel.inc"
149 SDNode *Select(SDOperand N);
151 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM,
152 bool isRoot = true, unsigned Depth = 0);
153 bool MatchAddressBase(SDOperand N, X86ISelAddressMode &AM,
154 bool isRoot, unsigned Depth);
155 bool SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
156 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
157 bool SelectLEAAddr(SDOperand Op, SDOperand N, SDOperand &Base,
158 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
159 bool SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
160 SDOperand N, SDOperand &Base, SDOperand &Scale,
161 SDOperand &Index, SDOperand &Disp,
162 SDOperand &InChain, SDOperand &OutChain);
163 bool TryFoldLoad(SDOperand P, SDOperand N,
164 SDOperand &Base, SDOperand &Scale,
165 SDOperand &Index, SDOperand &Disp);
166 void PreprocessForRMW(SelectionDAG &DAG);
167 void PreprocessForFPConvert(SelectionDAG &DAG);
169 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
170 /// inline asm expressions.
171 virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op,
173 std::vector<SDOperand> &OutOps,
176 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
178 inline void getAddressOperands(X86ISelAddressMode &AM, SDOperand &Base,
179 SDOperand &Scale, SDOperand &Index,
181 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
182 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
184 Scale = getI8Imm(AM.Scale);
186 // These are 32-bit even in 64-bit mode since RIP relative offset
189 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
191 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp);
193 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
194 else if (AM.JT != -1)
195 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
197 Disp = getI32Imm(AM.Disp);
200 /// getI8Imm - Return a target constant with the specified value, of type
202 inline SDOperand getI8Imm(unsigned Imm) {
203 return CurDAG->getTargetConstant(Imm, MVT::i8);
206 /// getI16Imm - Return a target constant with the specified value, of type
208 inline SDOperand getI16Imm(unsigned Imm) {
209 return CurDAG->getTargetConstant(Imm, MVT::i16);
212 /// getI32Imm - Return a target constant with the specified value, of type
214 inline SDOperand getI32Imm(unsigned Imm) {
215 return CurDAG->getTargetConstant(Imm, MVT::i32);
218 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
219 /// base register. Return the virtual register that holds this value.
220 SDNode *getGlobalBaseReg();
222 /// getTruncate - return an SDNode that implements a subreg based truncate
223 /// of the specified operand to the the specified value type.
224 SDNode *getTruncate(SDOperand N0, MVT::ValueType VT);
232 static SDNode *findFlagUse(SDNode *N) {
233 unsigned FlagResNo = N->getNumValues()-1;
234 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
236 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
237 SDOperand Op = User->getOperand(i);
238 if (Op.Val == N && Op.ResNo == FlagResNo)
245 static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
246 SDNode *Root, SDNode *Skip, bool &found,
247 std::set<SDNode *> &Visited) {
249 Use->getNodeId() > Def->getNodeId() ||
250 !Visited.insert(Use).second)
253 for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
254 SDNode *N = Use->getOperand(i).Val;
259 continue; // Immediate use is ok.
261 assert(Use->getOpcode() == ISD::STORE ||
262 Use->getOpcode() == X86ISD::CMP);
268 findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
272 /// isNonImmUse - Start searching from Root up the DAG to check is Def can
273 /// be reached. Return true if that's the case. However, ignore direct uses
274 /// by ImmedUse (which would be U in the example illustrated in
275 /// CanBeFoldedBy) and by Root (which can happen in the store case).
276 /// FIXME: to be really generic, we should allow direct use by any node
277 /// that is being folded. But realisticly since we only fold loads which
278 /// have one non-chain use, we only need to watch out for load/op/store
279 /// and load/op/cmp case where the root (store / cmp) may reach the load via
280 /// its chain operand.
281 static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
282 SDNode *Skip = NULL) {
283 std::set<SDNode *> Visited;
285 findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
290 bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const {
291 if (FastISel) return false;
293 // If U use can somehow reach N through another path then U can't fold N or
294 // it will create a cycle. e.g. In the following diagram, U can reach N
295 // through X. If N is folded into into U, then X is both a predecessor and
306 if (isNonImmUse(Root, N, U))
309 // If U produces a flag, then it gets (even more) interesting. Since it
310 // would have been "glued" together with its flag use, we need to check if
323 // If FU (flag use) indirectly reach N (the load), and U fold N (call it
324 // NU), then TF is a predecessor of FU and a successor of NU. But since
325 // NU and FU are flagged together, this effectively creates a cycle.
326 bool HasFlagUse = false;
327 MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1);
328 while ((VT == MVT::Flag && !Root->use_empty())) {
329 SDNode *FU = findFlagUse(Root);
336 VT = Root->getValueType(Root->getNumValues()-1);
340 return !isNonImmUse(Root, N, Root, U);
344 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
345 /// and move load below the TokenFactor. Replace store's chain operand with
346 /// load's chain result.
347 static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load,
348 SDOperand Store, SDOperand TF) {
349 std::vector<SDOperand> Ops;
350 for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i)
351 if (Load.Val == TF.Val->getOperand(i).Val)
352 Ops.push_back(Load.Val->getOperand(0));
354 Ops.push_back(TF.Val->getOperand(i));
355 DAG.UpdateNodeOperands(TF, &Ops[0], Ops.size());
356 DAG.UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
357 DAG.UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
358 Store.getOperand(2), Store.getOperand(3));
361 /// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
362 /// This is only run if not in -fast mode (aka -O0).
363 /// This allows the instruction selector to pick more read-modify-write
364 /// instructions. This is a common case:
374 /// [TokenFactor] [Op]
381 /// The fact the store's chain operand != load's chain will prevent the
382 /// (store (op (load))) instruction from being selected. We can transform it to:
401 void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
402 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
403 E = DAG.allnodes_end(); I != E; ++I) {
404 if (!ISD::isNON_TRUNCStore(I))
406 SDOperand Chain = I->getOperand(0);
407 if (Chain.Val->getOpcode() != ISD::TokenFactor)
410 SDOperand N1 = I->getOperand(1);
411 SDOperand N2 = I->getOperand(2);
412 if (MVT::isFloatingPoint(N1.getValueType()) ||
413 MVT::isVector(N1.getValueType()) ||
419 unsigned Opcode = N1.Val->getOpcode();
428 SDOperand N10 = N1.getOperand(0);
429 SDOperand N11 = N1.getOperand(1);
430 if (ISD::isNON_EXTLoad(N10.Val))
432 else if (ISD::isNON_EXTLoad(N11.Val)) {
436 RModW = RModW && N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
437 (N10.getOperand(1) == N2) &&
438 (N10.Val->getValueType(0) == N1.getValueType());
453 SDOperand N10 = N1.getOperand(0);
454 if (ISD::isNON_EXTLoad(N10.Val))
455 RModW = N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
456 (N10.getOperand(1) == N2) &&
457 (N10.Val->getValueType(0) == N1.getValueType());
465 MoveBelowTokenFactor(DAG, Load, SDOperand(I, 0), Chain);
472 /// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
473 /// nodes that target the FP stack to be store and load to the stack. This is a
474 /// gross hack. We would like to simply mark these as being illegal, but when
475 /// we do that, legalize produces these when it expands calls, then expands
476 /// these in the same legalize pass. We would like dag combine to be able to
477 /// hack on these between the call expansion and the node legalization. As such
478 /// this pass basically does "really late" legalization of these inline with the
480 void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) {
481 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
482 E = DAG.allnodes_end(); I != E; ) {
483 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
484 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
487 // If the source and destination are SSE registers, then this is a legal
488 // conversion that should not be lowered.
489 MVT::ValueType SrcVT = N->getOperand(0).getValueType();
490 MVT::ValueType DstVT = N->getValueType(0);
491 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
492 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
493 if (SrcIsSSE && DstIsSSE)
496 // If this is an FPStack extension (but not a truncation), it is a noop.
497 if (!SrcIsSSE && !DstIsSSE && N->getOpcode() == ISD::FP_EXTEND)
500 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
501 // FPStack has extload and truncstore. SSE can fold direct loads into other
502 // operations. Based on this, decide what we want to do.
503 MVT::ValueType MemVT;
504 if (N->getOpcode() == ISD::FP_ROUND)
505 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
507 MemVT = SrcIsSSE ? SrcVT : DstVT;
509 SDOperand MemTmp = DAG.CreateStackTemporary(MemVT);
511 // FIXME: optimize the case where the src/dest is a load or store?
512 SDOperand Store = DAG.getTruncStore(DAG.getEntryNode(), N->getOperand(0),
513 MemTmp, NULL, 0, MemVT);
514 SDOperand Result = DAG.getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
517 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
518 // extload we created. This will cause general havok on the dag because
519 // anything below the conversion could be folded into other existing nodes.
520 // To avoid invalidating 'I', back it up to the convert node.
522 DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result);
524 // Now that we did that, the node is dead. Increment the iterator to the
525 // next node to process, then delete N.
531 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
532 /// when it has created a SelectionDAG for us to codegen.
533 void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
535 MachineFunction::iterator FirstMBB = BB;
538 PreprocessForRMW(DAG);
540 // FIXME: This should only happen when not -fast.
541 PreprocessForFPConvert(DAG);
543 // Codegen the basic block.
545 DOUT << "===== Instruction selection begins:\n";
548 DAG.setRoot(SelectRoot(DAG.getRoot()));
550 DOUT << "===== Instruction selection ends:\n";
553 DAG.RemoveDeadNodes();
555 // Emit machine code to BB.
556 ScheduleAndEmitDAG(DAG);
558 // If we are emitting FP stack code, scan the basic block to determine if this
559 // block defines any FP values. If so, put an FP_REG_KILL instruction before
560 // the terminator of the block.
562 // Note that FP stack instructions are used in all modes for long double,
563 // so we always need to do this check.
564 // Also note that it's possible for an FP stack register to be live across
565 // an instruction that produces multiple basic blocks (SSE CMOV) so we
566 // must check all the generated basic blocks.
568 // Scan all of the machine instructions in these MBBs, checking for FP
569 // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
570 MachineFunction::iterator MBBI = FirstMBB;
572 bool ContainsFPCode = false;
573 for (MachineBasicBlock::iterator I = MBBI->begin(), E = MBBI->end();
574 !ContainsFPCode && I != E; ++I) {
575 if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
576 const TargetRegisterClass *clas;
577 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
578 if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
579 TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
580 ((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
581 X86::RFP32RegisterClass ||
582 clas == X86::RFP64RegisterClass ||
583 clas == X86::RFP80RegisterClass)) {
584 ContainsFPCode = true;
590 // Check PHI nodes in successor blocks. These PHI's will be lowered to have
591 // a copy of the input value in this block. In SSE mode, we only care about
593 if (!ContainsFPCode) {
594 // Final check, check LLVM BB's that are successors to the LLVM BB
595 // corresponding to BB for FP PHI nodes.
596 const BasicBlock *LLVMBB = BB->getBasicBlock();
598 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
599 !ContainsFPCode && SI != E; ++SI) {
600 for (BasicBlock::const_iterator II = SI->begin();
601 (PN = dyn_cast<PHINode>(II)); ++II) {
602 if (PN->getType()==Type::X86_FP80Ty ||
603 (!Subtarget->hasSSE1() && PN->getType()->isFloatingPoint()) ||
604 (!Subtarget->hasSSE2() && PN->getType()==Type::DoubleTy)) {
605 ContainsFPCode = true;
611 // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
612 if (ContainsFPCode) {
613 BuildMI(*MBBI, MBBI->getFirstTerminator(),
614 TM.getInstrInfo()->get(X86::FP_REG_KILL));
617 } while (&*(MBBI++) != BB);
620 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
621 /// the main function.
622 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
623 MachineFrameInfo *MFI) {
624 const TargetInstrInfo *TII = TM.getInstrInfo();
625 if (Subtarget->isTargetCygMing())
626 BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
629 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
630 // If this is main, emit special code for main.
631 MachineBasicBlock *BB = MF.begin();
632 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
633 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
636 /// MatchAddress - Add the specified node to the specified addressing mode,
637 /// returning true if it cannot be done. This just pattern matches for the
639 bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
640 bool isRoot, unsigned Depth) {
643 return MatchAddressBase(N, AM, isRoot, Depth);
645 // RIP relative addressing: %rip + 32-bit displacement!
647 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
648 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
649 if (isInt32(AM.Disp + Val)) {
657 int id = N.Val->getNodeId();
658 bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
660 switch (N.getOpcode()) {
662 case ISD::Constant: {
663 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
664 if (isInt32(AM.Disp + Val)) {
671 case X86ISD::Wrapper: {
672 bool is64Bit = Subtarget->is64Bit();
673 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
674 // Also, base and index reg must be 0 in order to use rip as base.
675 if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
676 AM.Base.Reg.Val || AM.IndexReg.Val))
678 if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
680 // If value is available in a register both base and index components have
681 // been picked, we can't fit the result available in the register in the
682 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
683 if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
684 SDOperand N0 = N.getOperand(0);
685 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
686 GlobalValue *GV = G->getGlobal();
688 AM.Disp += G->getOffset();
689 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
690 Subtarget->isPICStyleRIPRel();
692 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
693 AM.CP = CP->getConstVal();
694 AM.Align = CP->getAlignment();
695 AM.Disp += CP->getOffset();
696 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
697 Subtarget->isPICStyleRIPRel();
699 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
700 AM.ES = S->getSymbol();
701 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
702 Subtarget->isPICStyleRIPRel();
704 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
705 AM.JT = J->getIndex();
706 AM.isRIPRel = TM.getRelocationModel() != Reloc::Static &&
707 Subtarget->isPICStyleRIPRel();
714 case ISD::FrameIndex:
715 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
716 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
717 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
723 if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1 || AM.isRIPRel)
726 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
727 unsigned Val = CN->getValue();
728 if (Val == 1 || Val == 2 || Val == 3) {
730 SDOperand ShVal = N.Val->getOperand(0);
732 // Okay, we know that we have a scale by now. However, if the scaled
733 // value is an add of something and a constant, we can fold the
734 // constant into the disp field here.
735 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
736 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
737 AM.IndexReg = ShVal.Val->getOperand(0);
738 ConstantSDNode *AddVal =
739 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
740 uint64_t Disp = AM.Disp + (AddVal->getValue() << Val);
755 // A mul_lohi where we need the low part can be folded as a plain multiply.
756 if (N.ResNo != 0) break;
759 // X*[3,5,9] -> X+X*[2,4,8]
760 if (!AlreadySelected &&
761 AM.BaseType == X86ISelAddressMode::RegBase &&
762 AM.Base.Reg.Val == 0 &&
763 AM.IndexReg.Val == 0 &&
765 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
766 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
767 AM.Scale = unsigned(CN->getValue())-1;
769 SDOperand MulVal = N.Val->getOperand(0);
772 // Okay, we know that we have a scale by now. However, if the scaled
773 // value is an add of something and a constant, we can fold the
774 // constant into the disp field here.
775 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
776 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
777 Reg = MulVal.Val->getOperand(0);
778 ConstantSDNode *AddVal =
779 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
780 uint64_t Disp = AM.Disp + AddVal->getValue() * CN->getValue();
784 Reg = N.Val->getOperand(0);
786 Reg = N.Val->getOperand(0);
789 AM.IndexReg = AM.Base.Reg = Reg;
796 if (!AlreadySelected) {
797 X86ISelAddressMode Backup = AM;
798 if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
799 !MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
802 if (!MatchAddress(N.Val->getOperand(1), AM, false, Depth+1) &&
803 !MatchAddress(N.Val->getOperand(0), AM, false, Depth+1))
810 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
811 if (AlreadySelected) break;
813 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
814 X86ISelAddressMode Backup = AM;
815 // Start with the LHS as an addr mode.
816 if (!MatchAddress(N.getOperand(0), AM, false) &&
817 // Address could not have picked a GV address for the displacement.
819 // On x86-64, the resultant disp must fit in 32-bits.
820 isInt32(AM.Disp + CN->getSignExtended()) &&
821 // Check to see if the LHS & C is zero.
822 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getValue())) {
823 AM.Disp += CN->getValue();
831 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
832 // allows us to fold the shift into this addressing mode.
833 if (AlreadySelected) break;
834 SDOperand Shift = N.getOperand(0);
835 if (Shift.getOpcode() != ISD::SHL) break;
837 // Scale must not be used already.
838 if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
840 // Not when RIP is used as the base.
841 if (AM.isRIPRel) break;
843 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
844 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
845 if (!C1 || !C2) break;
847 // Not likely to be profitable if either the AND or SHIFT node has more
848 // than one use (unless all uses are for address computation). Besides,
849 // isel mechanism requires their node ids to be reused.
850 if (!N.hasOneUse() || !Shift.hasOneUse())
853 // Verify that the shift amount is something we can fold.
854 unsigned ShiftCst = C1->getValue();
855 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
858 // Get the new AND mask, this folds to a constant.
859 SDOperand NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
860 SDOperand(C2, 0), SDOperand(C1, 0));
861 SDOperand NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
862 Shift.getOperand(0), NewANDMask);
863 NewANDMask.Val->setNodeId(Shift.Val->getNodeId());
864 NewAND.Val->setNodeId(N.Val->getNodeId());
866 AM.Scale = 1 << ShiftCst;
867 AM.IndexReg = NewAND;
872 return MatchAddressBase(N, AM, isRoot, Depth);
875 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
876 /// specified addressing mode without any further recursion.
877 bool X86DAGToDAGISel::MatchAddressBase(SDOperand N, X86ISelAddressMode &AM,
878 bool isRoot, unsigned Depth) {
879 // Is the base register already occupied?
880 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
881 // If so, check to see if the scale index register is set.
882 if (AM.IndexReg.Val == 0 && !AM.isRIPRel) {
888 // Otherwise, we cannot select it.
892 // Default, generate it as a register.
893 AM.BaseType = X86ISelAddressMode::RegBase;
898 /// SelectAddr - returns true if it is able pattern match an addressing mode.
899 /// It returns the operands which make up the maximal addressing mode it can
900 /// match by reference.
901 bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
902 SDOperand &Scale, SDOperand &Index,
904 X86ISelAddressMode AM;
905 if (MatchAddress(N, AM))
908 MVT::ValueType VT = N.getValueType();
909 if (AM.BaseType == X86ISelAddressMode::RegBase) {
910 if (!AM.Base.Reg.Val)
911 AM.Base.Reg = CurDAG->getRegister(0, VT);
914 if (!AM.IndexReg.Val)
915 AM.IndexReg = CurDAG->getRegister(0, VT);
917 getAddressOperands(AM, Base, Scale, Index, Disp);
921 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
923 static inline bool isZeroNode(SDOperand Elt) {
924 return ((isa<ConstantSDNode>(Elt) &&
925 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
926 (isa<ConstantFPSDNode>(Elt) &&
927 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
931 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
932 /// match a load whose top elements are either undef or zeros. The load flavor
933 /// is derived from the type of N, which is either v4f32 or v2f64.
934 bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
935 SDOperand N, SDOperand &Base,
936 SDOperand &Scale, SDOperand &Index,
937 SDOperand &Disp, SDOperand &InChain,
938 SDOperand &OutChain) {
939 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
940 InChain = N.getOperand(0).getValue(1);
941 if (ISD::isNON_EXTLoad(InChain.Val) &&
942 InChain.getValue(0).hasOneUse() &&
944 CanBeFoldedBy(N.Val, Pred.Val, Op.Val)) {
945 LoadSDNode *LD = cast<LoadSDNode>(InChain);
946 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
948 OutChain = LD->getChain();
953 // Also handle the case where we explicitly require zeros in the top
954 // elements. This is a vector shuffle from the zero vector.
955 if (N.getOpcode() == ISD::VECTOR_SHUFFLE && N.Val->hasOneUse() &&
956 // Check to see if the top elements are all zeros (or bitcast of zeros).
957 ISD::isBuildVectorAllZeros(N.getOperand(0).Val) &&
958 N.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR &&
959 N.getOperand(1).Val->hasOneUse() &&
960 ISD::isNON_EXTLoad(N.getOperand(1).getOperand(0).Val) &&
961 N.getOperand(1).getOperand(0).hasOneUse()) {
962 // Check to see if the shuffle mask is 4/L/L/L or 2/L, where L is something
964 unsigned VecWidth=MVT::getVectorNumElements(N.getOperand(0).getValueType());
965 SDOperand ShufMask = N.getOperand(2);
966 assert(ShufMask.getOpcode() == ISD::BUILD_VECTOR && "Invalid shuf mask!");
967 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(ShufMask.getOperand(0))) {
968 if (C->getValue() == VecWidth) {
969 for (unsigned i = 1; i != VecWidth; ++i) {
970 if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF) {
973 ConstantSDNode *C = cast<ConstantSDNode>(ShufMask.getOperand(i));
974 if (C->getValue() >= VecWidth) return false;
979 // Okay, this is a zero extending load. Fold it.
980 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(1).getOperand(0));
981 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
983 OutChain = LD->getChain();
984 InChain = SDOperand(LD, 1);
992 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
993 /// mode it matches can be cost effectively emitted as an LEA instruction.
994 bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N,
995 SDOperand &Base, SDOperand &Scale,
996 SDOperand &Index, SDOperand &Disp) {
997 X86ISelAddressMode AM;
998 if (MatchAddress(N, AM))
1001 MVT::ValueType VT = N.getValueType();
1002 unsigned Complexity = 0;
1003 if (AM.BaseType == X86ISelAddressMode::RegBase)
1004 if (AM.Base.Reg.Val)
1007 AM.Base.Reg = CurDAG->getRegister(0, VT);
1008 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1011 if (AM.IndexReg.Val)
1014 AM.IndexReg = CurDAG->getRegister(0, VT);
1016 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1021 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1022 // to a LEA. This is determined with some expermentation but is by no means
1023 // optimal (especially for code size consideration). LEA is nice because of
1024 // its three-address nature. Tweak the cost function again when we can run
1025 // convertToThreeAddress() at register allocation time.
1026 if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
1027 // For X86-64, we should always use lea to materialize RIP relative
1029 if (Subtarget->is64Bit())
1035 if (AM.Disp && (AM.Base.Reg.Val || AM.IndexReg.Val))
1038 if (Complexity > 2) {
1039 getAddressOperands(AM, Base, Scale, Index, Disp);
1045 bool X86DAGToDAGISel::TryFoldLoad(SDOperand P, SDOperand N,
1046 SDOperand &Base, SDOperand &Scale,
1047 SDOperand &Index, SDOperand &Disp) {
1048 if (ISD::isNON_EXTLoad(N.Val) &&
1050 CanBeFoldedBy(N.Val, P.Val, P.Val))
1051 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
1055 /// getGlobalBaseReg - Output the instructions required to put the
1056 /// base address to use for accessing globals into a register.
1058 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1059 assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing");
1060 if (!GlobalBaseReg) {
1061 // Insert the set of GlobalBaseReg into the first MBB of the function
1062 MachineFunction *MF = BB->getParent();
1063 MachineBasicBlock &FirstMBB = MF->front();
1064 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
1065 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1066 unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
1068 const TargetInstrInfo *TII = TM.getInstrInfo();
1069 // Operand of MovePCtoStack is completely ignored by asm printer. It's
1070 // only used in JIT code emission as displacement to pc.
1071 BuildMI(FirstMBB, MBBI, TII->get(X86::MOVPC32r), PC).addImm(0);
1073 // If we're using vanilla 'GOT' PIC style, we should use relative addressing
1074 // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external
1075 if (TM.getRelocationModel() == Reloc::PIC_ &&
1076 Subtarget->isPICStyleGOT()) {
1077 GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
1078 BuildMI(FirstMBB, MBBI, TII->get(X86::ADD32ri), GlobalBaseReg)
1079 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
1085 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
1088 static SDNode *FindCallStartFromCall(SDNode *Node) {
1089 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1090 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1091 "Node doesn't have a token chain argument!");
1092 return FindCallStartFromCall(Node->getOperand(0).Val);
1095 SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) {
1099 SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1100 // Ensure that the source register has an 8-bit subreg on 32-bit targets
1101 if (!Subtarget->is64Bit()) {
1104 switch (N0.getValueType()) {
1105 default: assert(0 && "Unknown truncate!");
1107 Opc = X86::MOV16to16_;
1111 Opc = X86::MOV32to32_;
1115 N0 = SDOperand(CurDAG->getTargetNode(Opc, VT, MVT::Flag, N0), 0);
1116 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1117 VT, N0, SRIdx, N0.getValue(1));
1121 SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
1124 SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
1126 default: assert(0 && "Unknown truncate!"); break;
1128 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx);
1132 SDNode *X86DAGToDAGISel::Select(SDOperand N) {
1133 SDNode *Node = N.Val;
1134 MVT::ValueType NVT = Node->getValueType(0);
1136 unsigned Opcode = Node->getOpcode();
1139 DOUT << std::string(Indent, ' ') << "Selecting: ";
1140 DEBUG(Node->dump(CurDAG));
1145 if (Opcode >= ISD::BUILTIN_OP_END && Opcode < X86ISD::FIRST_NUMBER) {
1147 DOUT << std::string(Indent-2, ' ') << "== ";
1148 DEBUG(Node->dump(CurDAG));
1152 return NULL; // Already selected.
1157 case X86ISD::GlobalBaseReg:
1158 return getGlobalBaseReg();
1160 case X86ISD::FP_GET_RESULT2: {
1161 SDOperand Chain = N.getOperand(0);
1162 SDOperand InFlag = N.getOperand(1);
1163 AddToISelQueue(Chain);
1164 AddToISelQueue(InFlag);
1165 std::vector<MVT::ValueType> Tys;
1166 Tys.push_back(MVT::f80);
1167 Tys.push_back(MVT::f80);
1168 Tys.push_back(MVT::Other);
1169 Tys.push_back(MVT::Flag);
1170 SDOperand Ops[] = { Chain, InFlag };
1171 SDNode *ResNode = CurDAG->getTargetNode(X86::FpGETRESULT80x2, Tys,
1173 Chain = SDOperand(ResNode, 2);
1174 InFlag = SDOperand(ResNode, 3);
1175 ReplaceUses(SDOperand(N.Val, 2), Chain);
1176 ReplaceUses(SDOperand(N.Val, 3), InFlag);
1181 // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
1182 // code and is matched first so to prevent it from being turned into
1184 // In 64-bit small code size mode, use LEA to take advantage of
1185 // RIP-relative addressing.
1186 if (TM.getCodeModel() != CodeModel::Small)
1188 MVT::ValueType PtrVT = TLI.getPointerTy();
1189 SDOperand N0 = N.getOperand(0);
1190 SDOperand N1 = N.getOperand(1);
1191 if (N.Val->getValueType(0) == PtrVT &&
1192 N0.getOpcode() == X86ISD::Wrapper &&
1193 N1.getOpcode() == ISD::Constant) {
1194 unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue();
1196 // TODO: handle ExternalSymbolSDNode.
1197 if (GlobalAddressSDNode *G =
1198 dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
1199 C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
1200 G->getOffset() + Offset);
1201 } else if (ConstantPoolSDNode *CP =
1202 dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
1203 C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
1205 CP->getOffset()+Offset);
1209 if (Subtarget->is64Bit()) {
1210 SDOperand Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
1211 CurDAG->getRegister(0, PtrVT), C };
1212 return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4);
1214 return CurDAG->SelectNodeTo(N.Val, X86::MOV32ri, PtrVT, C);
1218 // Other cases are handled by auto-generated code.
1222 case ISD::SMUL_LOHI:
1223 case ISD::UMUL_LOHI: {
1224 SDOperand N0 = Node->getOperand(0);
1225 SDOperand N1 = Node->getOperand(1);
1227 // There are several forms of IMUL that just return the low part and
1228 // don't have fixed-register operands. If we don't need the high part,
1229 // use these instead. They can be selected with the generated ISel code.
1230 if (NVT != MVT::i8 &&
1231 N.getValue(1).use_empty()) {
1232 N = CurDAG->getNode(ISD::MUL, NVT, N0, N1);
1236 bool isSigned = Opcode == ISD::SMUL_LOHI;
1239 default: assert(0 && "Unsupported VT!");
1240 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1241 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1242 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1243 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1247 default: assert(0 && "Unsupported VT!");
1248 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1249 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1250 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1251 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1254 unsigned LoReg, HiReg;
1256 default: assert(0 && "Unsupported VT!");
1257 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1258 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1259 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1260 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1263 SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
1264 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1265 // multiplty is commmutative
1267 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
1273 SDOperand InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
1274 N0, SDOperand()).getValue(1);
1277 AddToISelQueue(N1.getOperand(0));
1278 AddToISelQueue(Tmp0);
1279 AddToISelQueue(Tmp1);
1280 AddToISelQueue(Tmp2);
1281 AddToISelQueue(Tmp3);
1282 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1284 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1285 InFlag = SDOperand(CNode, 1);
1286 // Update the chain.
1287 ReplaceUses(N1.getValue(1), SDOperand(CNode, 0));
1291 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1294 // Copy the low half of the result, if it is needed.
1295 if (!N.getValue(0).use_empty()) {
1296 SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1297 LoReg, NVT, InFlag);
1298 InFlag = Result.getValue(2);
1299 ReplaceUses(N.getValue(0), Result);
1301 DOUT << std::string(Indent-2, ' ') << "=> ";
1302 DEBUG(Result.Val->dump(CurDAG));
1306 // Copy the high half of the result, if it is needed.
1307 if (!N.getValue(1).use_empty()) {
1309 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1310 // Prevent use of AH in a REX instruction by referencing AX instead.
1311 // Shift it down 8 bits.
1312 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1313 X86::AX, MVT::i16, InFlag);
1314 InFlag = Result.getValue(2);
1315 Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1316 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1317 // Then truncate it down to i8.
1318 SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1319 Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1320 MVT::i8, Result, SRIdx), 0);
1322 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1323 HiReg, NVT, InFlag);
1324 InFlag = Result.getValue(2);
1326 ReplaceUses(N.getValue(1), Result);
1328 DOUT << std::string(Indent-2, ' ') << "=> ";
1329 DEBUG(Result.Val->dump(CurDAG));
1342 case ISD::UDIVREM: {
1343 SDOperand N0 = Node->getOperand(0);
1344 SDOperand N1 = Node->getOperand(1);
1346 bool isSigned = Opcode == ISD::SDIVREM;
1349 default: assert(0 && "Unsupported VT!");
1350 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1351 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1352 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1353 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1357 default: assert(0 && "Unsupported VT!");
1358 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1359 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1360 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1361 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1364 unsigned LoReg, HiReg;
1365 unsigned ClrOpcode, SExtOpcode;
1367 default: assert(0 && "Unsupported VT!");
1369 LoReg = X86::AL; HiReg = X86::AH;
1371 SExtOpcode = X86::CBW;
1374 LoReg = X86::AX; HiReg = X86::DX;
1375 ClrOpcode = X86::MOV16r0;
1376 SExtOpcode = X86::CWD;
1379 LoReg = X86::EAX; HiReg = X86::EDX;
1380 ClrOpcode = X86::MOV32r0;
1381 SExtOpcode = X86::CDQ;
1384 LoReg = X86::RAX; HiReg = X86::RDX;
1385 ClrOpcode = X86::MOV64r0;
1386 SExtOpcode = X86::CQO;
1390 SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
1391 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1394 if (NVT == MVT::i8 && !isSigned) {
1395 // Special case for div8, just use a move with zero extension to AX to
1396 // clear the upper 8 bits (AH).
1397 SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1398 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1399 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1400 AddToISelQueue(N0.getOperand(0));
1401 AddToISelQueue(Tmp0);
1402 AddToISelQueue(Tmp1);
1403 AddToISelQueue(Tmp2);
1404 AddToISelQueue(Tmp3);
1406 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1408 Chain = Move.getValue(1);
1409 ReplaceUses(N0.getValue(1), Chain);
1413 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1414 Chain = CurDAG->getEntryNode();
1416 Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDOperand());
1417 InFlag = Chain.getValue(1);
1421 CurDAG->getCopyToReg(CurDAG->getEntryNode(),
1422 LoReg, N0, SDOperand()).getValue(1);
1424 // Sign extend the low part into the high part.
1426 SDOperand(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1428 // Zero out the high part, effectively zero extending the input.
1429 SDOperand ClrNode = SDOperand(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
1430 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
1431 ClrNode, InFlag).getValue(1);
1436 AddToISelQueue(N1.getOperand(0));
1437 AddToISelQueue(Tmp0);
1438 AddToISelQueue(Tmp1);
1439 AddToISelQueue(Tmp2);
1440 AddToISelQueue(Tmp3);
1441 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1443 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1444 InFlag = SDOperand(CNode, 1);
1445 // Update the chain.
1446 ReplaceUses(N1.getValue(1), SDOperand(CNode, 0));
1450 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1453 // Copy the division (low) result, if it is needed.
1454 if (!N.getValue(0).use_empty()) {
1455 SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1456 LoReg, NVT, InFlag);
1457 InFlag = Result.getValue(2);
1458 ReplaceUses(N.getValue(0), Result);
1460 DOUT << std::string(Indent-2, ' ') << "=> ";
1461 DEBUG(Result.Val->dump(CurDAG));
1465 // Copy the remainder (high) result, if it is needed.
1466 if (!N.getValue(1).use_empty()) {
1468 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1469 // Prevent use of AH in a REX instruction by referencing AX instead.
1470 // Shift it down 8 bits.
1471 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1472 X86::AX, MVT::i16, InFlag);
1473 InFlag = Result.getValue(2);
1474 Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1475 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1476 // Then truncate it down to i8.
1477 SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1478 Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1479 MVT::i8, Result, SRIdx), 0);
1481 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1482 HiReg, NVT, InFlag);
1483 InFlag = Result.getValue(2);
1485 ReplaceUses(N.getValue(1), Result);
1487 DOUT << std::string(Indent-2, ' ') << "=> ";
1488 DEBUG(Result.Val->dump(CurDAG));
1500 case ISD::ANY_EXTEND: {
1501 SDOperand N0 = Node->getOperand(0);
1503 if (NVT == MVT::i64 || NVT == MVT::i32 || NVT == MVT::i16) {
1505 switch(N0.getValueType()) {
1507 SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
1510 SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
1513 if (Subtarget->is64Bit())
1514 SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1516 default: assert(0 && "Unknown any_extend!");
1519 SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG,
1523 DOUT << std::string(Indent-2, ' ') << "=> ";
1524 DEBUG(ResNode->dump(CurDAG));
1529 } // Otherwise let generated ISel handle it.
1534 case ISD::SIGN_EXTEND_INREG: {
1535 SDOperand N0 = Node->getOperand(0);
1538 MVT::ValueType SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
1539 SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0);
1543 if (SVT == MVT::i8) Opc = X86::MOVSX16rr8;
1544 else assert(0 && "Unknown sign_extend_inreg!");
1548 case MVT::i8: Opc = X86::MOVSX32rr8; break;
1549 case MVT::i16: Opc = X86::MOVSX32rr16; break;
1550 default: assert(0 && "Unknown sign_extend_inreg!");
1555 case MVT::i8: Opc = X86::MOVSX64rr8; break;
1556 case MVT::i16: Opc = X86::MOVSX64rr16; break;
1557 case MVT::i32: Opc = X86::MOVSX64rr32; break;
1558 default: assert(0 && "Unknown sign_extend_inreg!");
1561 default: assert(0 && "Unknown sign_extend_inreg!");
1564 SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
1567 DOUT << std::string(Indent-2, ' ') << "=> ";
1568 DEBUG(TruncOp.Val->dump(CurDAG));
1570 DOUT << std::string(Indent-2, ' ') << "=> ";
1571 DEBUG(ResNode->dump(CurDAG));
1579 case ISD::TRUNCATE: {
1580 SDOperand Input = Node->getOperand(0);
1581 AddToISelQueue(Node->getOperand(0));
1582 SDNode *ResNode = getTruncate(Input, NVT);
1585 DOUT << std::string(Indent-2, ' ') << "=> ";
1586 DEBUG(ResNode->dump(CurDAG));
1595 SDNode *ResNode = SelectCode(N);
1598 DOUT << std::string(Indent-2, ' ') << "=> ";
1599 if (ResNode == NULL || ResNode == N.Val)
1600 DEBUG(N.Val->dump(CurDAG));
1602 DEBUG(ResNode->dump(CurDAG));
1610 bool X86DAGToDAGISel::
1611 SelectInlineAsmMemoryOperand(const SDOperand &Op, char ConstraintCode,
1612 std::vector<SDOperand> &OutOps, SelectionDAG &DAG){
1613 SDOperand Op0, Op1, Op2, Op3;
1614 switch (ConstraintCode) {
1615 case 'o': // offsetable ??
1616 case 'v': // not offsetable ??
1617 default: return true;
1619 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1624 OutOps.push_back(Op0);
1625 OutOps.push_back(Op1);
1626 OutOps.push_back(Op2);
1627 OutOps.push_back(Op3);
1628 AddToISelQueue(Op0);
1629 AddToISelQueue(Op1);
1630 AddToISelQueue(Op2);
1631 AddToISelQueue(Op3);
1635 /// createX86ISelDag - This pass converts a legalized DAG into a
1636 /// X86-specific DAG, ready for instruction scheduling.
1638 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1639 return new X86DAGToDAGISel(TM, Fast);