1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/Streams.h"
40 #include "llvm/ADT/SmallPtrSet.h"
41 #include "llvm/ADT/Statistic.h"
44 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
46 //===----------------------------------------------------------------------===//
47 // Pattern Matcher Implementation
48 //===----------------------------------------------------------------------===//
51 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
52 /// SDValue's instead of register numbers for the leaves of the matched
54 struct X86ISelAddressMode {
60 struct { // This is really a union, discriminated by BaseType!
65 bool isRIPRel; // RIP as base?
73 unsigned Align; // CP alignment.
76 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
77 GV(0), CP(0), ES(0), JT(-1), Align(0) {
80 cerr << "X86ISelAddressMode " << this << "\n";
82 if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
84 cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
85 cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n";
87 if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
89 cerr << " Disp " << Disp << "\n";
90 cerr << "GV "; if (GV) GV->dump();
92 cerr << " CP "; if (CP) CP->dump();
95 cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
96 cerr << " JT" << JT << " Align" << Align << "\n";
102 //===--------------------------------------------------------------------===//
103 /// ISel - X86 specific code to select X86 machine instructions for
104 /// SelectionDAG operations.
106 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
107 /// TM - Keep a reference to X86TargetMachine.
109 X86TargetMachine &TM;
111 /// X86Lowering - This object fully describes how to lower LLVM code to an
112 /// X86-specific SelectionDAG.
113 X86TargetLowering &X86Lowering;
115 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
116 /// make the right decision when generating code for different targets.
117 const X86Subtarget *Subtarget;
119 /// CurBB - Current BB being isel'd.
121 MachineBasicBlock *CurBB;
123 /// OptForSize - If true, selector should try to optimize for code size
124 /// instead of performance.
128 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
129 : SelectionDAGISel(tm, fast),
130 TM(tm), X86Lowering(*TM.getTargetLowering()),
131 Subtarget(&TM.getSubtarget<X86Subtarget>()),
134 virtual const char *getPassName() const {
135 return "X86 DAG->DAG Instruction Selection";
138 /// InstructionSelect - This callback is invoked by
139 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
140 virtual void InstructionSelect();
142 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
145 bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const;
147 // Include the pieces autogenerated from the target description.
148 #include "X86GenDAGISel.inc"
151 SDNode *Select(SDValue N);
152 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
154 bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
155 bool isRoot = true, unsigned Depth = 0);
156 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
157 bool isRoot, unsigned Depth);
158 bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
159 SDValue &Scale, SDValue &Index, SDValue &Disp);
160 bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
161 SDValue &Scale, SDValue &Index, SDValue &Disp);
162 bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
163 SDValue N, SDValue &Base, SDValue &Scale,
164 SDValue &Index, SDValue &Disp,
165 SDValue &InChain, SDValue &OutChain);
166 bool TryFoldLoad(SDValue P, SDValue N,
167 SDValue &Base, SDValue &Scale,
168 SDValue &Index, SDValue &Disp);
169 void PreprocessForRMW();
170 void PreprocessForFPConvert();
172 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
173 /// inline asm expressions.
174 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
176 std::vector<SDValue> &OutOps);
178 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
180 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
181 SDValue &Scale, SDValue &Index,
183 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
184 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
186 Scale = getI8Imm(AM.Scale);
188 // These are 32-bit even in 64-bit mode since RIP relative offset
191 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
193 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
196 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
197 else if (AM.JT != -1)
198 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
200 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
203 /// getI8Imm - Return a target constant with the specified value, of type
205 inline SDValue getI8Imm(unsigned Imm) {
206 return CurDAG->getTargetConstant(Imm, MVT::i8);
209 /// getI16Imm - Return a target constant with the specified value, of type
211 inline SDValue getI16Imm(unsigned Imm) {
212 return CurDAG->getTargetConstant(Imm, MVT::i16);
215 /// getI32Imm - Return a target constant with the specified value, of type
217 inline SDValue getI32Imm(unsigned Imm) {
218 return CurDAG->getTargetConstant(Imm, MVT::i32);
221 /// getGlobalBaseReg - Return an SDNode that returns the value of
222 /// the global base register. Output instructions required to
223 /// initialize the global base register, if necessary.
225 SDNode *getGlobalBaseReg();
227 /// getTruncateTo8Bit - return an SDNode that implements a subreg based
228 /// truncate of the specified operand to i8. This can be done with tablegen,
229 /// except that this code uses MVT::Flag in a tricky way that happens to
230 /// improve scheduling in some cases.
231 SDNode *getTruncateTo8Bit(SDValue N0);
239 /// findFlagUse - Return use of MVT::Flag value produced by the specified
242 static SDNode *findFlagUse(SDNode *N) {
243 unsigned FlagResNo = N->getNumValues()-1;
244 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
246 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
247 SDValue Op = User->getOperand(i);
248 if (Op.getNode() == N && Op.getResNo() == FlagResNo)
255 /// findNonImmUse - Return true by reference in "found" if "Use" is an
256 /// non-immediate use of "Def". This function recursively traversing
257 /// up the operand chain ignoring certain nodes.
258 static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
259 SDNode *Root, bool &found,
260 SmallPtrSet<SDNode*, 16> &Visited) {
262 Use->getNodeId() < Def->getNodeId() ||
263 !Visited.insert(Use))
266 for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
267 SDNode *N = Use->getOperand(i).getNode();
269 if (Use == ImmedUse || Use == Root)
270 continue; // We are not looking for immediate use.
276 // Traverse up the operand chain.
277 findNonImmUse(N, Def, ImmedUse, Root, found, Visited);
281 /// isNonImmUse - Start searching from Root up the DAG to check is Def can
282 /// be reached. Return true if that's the case. However, ignore direct uses
283 /// by ImmedUse (which would be U in the example illustrated in
284 /// IsLegalAndProfitableToFold) and by Root (which can happen in the store
286 /// FIXME: to be really generic, we should allow direct use by any node
287 /// that is being folded. But realisticly since we only fold loads which
288 /// have one non-chain use, we only need to watch out for load/op/store
289 /// and load/op/cmp case where the root (store / cmp) may reach the load via
290 /// its chain operand.
291 static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse) {
292 SmallPtrSet<SDNode*, 16> Visited;
294 findNonImmUse(Root, Def, ImmedUse, Root, found, Visited);
299 bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
300 SDNode *Root) const {
301 if (Fast) return false;
304 switch (U->getOpcode()) {
312 // If the other operand is a 8-bit immediate we should fold the immediate
313 // instead. This reduces code size.
315 // movl 4(%esp), %eax
319 // addl 4(%esp), %eax
320 // The former is 2 bytes shorter. In case where the increment is 1, then
321 // the saving can be 4 bytes (by using incl %eax).
322 ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(U->getOperand(1));
324 if (U->getValueType(0) == MVT::i64) {
325 if ((int32_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
328 if ((int8_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
335 // If Root use can somehow reach N through a path that that doesn't contain
336 // U then folding N would create a cycle. e.g. In the following
337 // diagram, Root can reach N through X. If N is folded into into Root, then
338 // X is both a predecessor and a successor of U.
349 // * indicates nodes to be folded together.
351 // If Root produces a flag, then it gets (even more) interesting. Since it
352 // will be "glued" together with its flag use in the scheduler, we need to
353 // check if it might reach N.
372 // If FU (flag use) indirectly reaches N (the load), and Root folds N
373 // (call it Fold), then X is a predecessor of FU and a successor of
374 // Fold. But since Fold and FU are flagged together, this will create
375 // a cycle in the scheduling graph.
377 MVT VT = Root->getValueType(Root->getNumValues()-1);
378 while (VT == MVT::Flag) {
379 SDNode *FU = findFlagUse(Root);
383 VT = Root->getValueType(Root->getNumValues()-1);
386 return !isNonImmUse(Root, N, U);
389 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
390 /// and move load below the TokenFactor. Replace store's chain operand with
391 /// load's chain result.
392 static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load,
393 SDValue Store, SDValue TF) {
394 SmallVector<SDValue, 4> Ops;
395 for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i)
396 if (Load.getNode() == TF.getOperand(i).getNode())
397 Ops.push_back(Load.getOperand(0));
399 Ops.push_back(TF.getOperand(i));
400 CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size());
401 CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
402 CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
403 Store.getOperand(2), Store.getOperand(3));
406 /// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
408 static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address,
410 if (N.getOpcode() == ISD::BIT_CONVERT)
413 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
414 if (!LD || LD->isVolatile())
416 if (LD->getAddressingMode() != ISD::UNINDEXED)
419 ISD::LoadExtType ExtType = LD->getExtensionType();
420 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
424 N.getOperand(1) == Address &&
425 N.getNode()->isOperandOf(Chain.getNode())) {
432 /// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
433 /// operand and move load below the call's chain operand.
434 static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
435 SDValue Call, SDValue Chain) {
436 SmallVector<SDValue, 8> Ops;
437 for (unsigned i = 0, e = Chain.getNode()->getNumOperands(); i != e; ++i)
438 if (Load.getNode() == Chain.getOperand(i).getNode())
439 Ops.push_back(Load.getOperand(0));
441 Ops.push_back(Chain.getOperand(i));
442 CurDAG->UpdateNodeOperands(Chain, &Ops[0], Ops.size());
443 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
444 Load.getOperand(1), Load.getOperand(2));
446 Ops.push_back(SDValue(Load.getNode(), 1));
447 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
448 Ops.push_back(Call.getOperand(i));
449 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
452 /// isCalleeLoad - Return true if call address is a load and it can be
453 /// moved below CALLSEQ_START and the chains leading up to the call.
454 /// Return the CALLSEQ_START by reference as a second output.
455 static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
456 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
458 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
461 LD->getAddressingMode() != ISD::UNINDEXED ||
462 LD->getExtensionType() != ISD::NON_EXTLOAD)
465 // Now let's find the callseq_start.
466 while (Chain.getOpcode() != ISD::CALLSEQ_START) {
467 if (!Chain.hasOneUse())
469 Chain = Chain.getOperand(0);
471 return Chain.getOperand(0).getNode() == Callee.getNode();
475 /// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
476 /// This is only run if not in -fast mode (aka -O0).
477 /// This allows the instruction selector to pick more read-modify-write
478 /// instructions. This is a common case:
488 /// [TokenFactor] [Op]
495 /// The fact the store's chain operand != load's chain will prevent the
496 /// (store (op (load))) instruction from being selected. We can transform it to:
515 void X86DAGToDAGISel::PreprocessForRMW() {
516 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
517 E = CurDAG->allnodes_end(); I != E; ++I) {
518 if (I->getOpcode() == X86ISD::CALL) {
519 /// Also try moving call address load from outside callseq_start to just
520 /// before the call to allow it to be folded.
538 SDValue Chain = I->getOperand(0);
539 SDValue Load = I->getOperand(1);
540 if (!isCalleeLoad(Load, Chain))
542 MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain);
547 if (!ISD::isNON_TRUNCStore(I))
549 SDValue Chain = I->getOperand(0);
551 if (Chain.getNode()->getOpcode() != ISD::TokenFactor)
554 SDValue N1 = I->getOperand(1);
555 SDValue N2 = I->getOperand(2);
556 if ((N1.getValueType().isFloatingPoint() &&
557 !N1.getValueType().isVector()) ||
563 unsigned Opcode = N1.getNode()->getOpcode();
572 case ISD::VECTOR_SHUFFLE: {
573 SDValue N10 = N1.getOperand(0);
574 SDValue N11 = N1.getOperand(1);
575 RModW = isRMWLoad(N10, Chain, N2, Load);
577 RModW = isRMWLoad(N11, Chain, N2, Load);
590 SDValue N10 = N1.getOperand(0);
591 RModW = isRMWLoad(N10, Chain, N2, Load);
597 MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain);
604 /// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
605 /// nodes that target the FP stack to be store and load to the stack. This is a
606 /// gross hack. We would like to simply mark these as being illegal, but when
607 /// we do that, legalize produces these when it expands calls, then expands
608 /// these in the same legalize pass. We would like dag combine to be able to
609 /// hack on these between the call expansion and the node legalization. As such
610 /// this pass basically does "really late" legalization of these inline with the
612 void X86DAGToDAGISel::PreprocessForFPConvert() {
613 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
614 E = CurDAG->allnodes_end(); I != E; ) {
615 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
616 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
619 // If the source and destination are SSE registers, then this is a legal
620 // conversion that should not be lowered.
621 MVT SrcVT = N->getOperand(0).getValueType();
622 MVT DstVT = N->getValueType(0);
623 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
624 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
625 if (SrcIsSSE && DstIsSSE)
628 if (!SrcIsSSE && !DstIsSSE) {
629 // If this is an FPStack extension, it is a noop.
630 if (N->getOpcode() == ISD::FP_EXTEND)
632 // If this is a value-preserving FPStack truncation, it is a noop.
633 if (N->getConstantOperandVal(1))
637 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
638 // FPStack has extload and truncstore. SSE can fold direct loads into other
639 // operations. Based on this, decide what we want to do.
641 if (N->getOpcode() == ISD::FP_ROUND)
642 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
644 MemVT = SrcIsSSE ? SrcVT : DstVT;
646 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
648 // FIXME: optimize the case where the src/dest is a load or store?
649 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(),
651 MemTmp, NULL, 0, MemVT);
652 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
655 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
656 // extload we created. This will cause general havok on the dag because
657 // anything below the conversion could be folded into other existing nodes.
658 // To avoid invalidating 'I', back it up to the convert node.
660 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
662 // Now that we did that, the node is dead. Increment the iterator to the
663 // next node to process, then delete N.
665 CurDAG->DeleteNode(N);
669 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
670 /// when it has created a SelectionDAG for us to codegen.
671 void X86DAGToDAGISel::InstructionSelect() {
672 CurBB = BB; // BB can change as result of isel.
673 const Function *F = CurDAG->getMachineFunction().getFunction();
674 OptForSize = F->hasFnAttr(Attribute::OptimizeForSize);
680 // FIXME: This should only happen when not -fast.
681 PreprocessForFPConvert();
683 // Codegen the basic block.
685 DOUT << "===== Instruction selection begins:\n";
690 DOUT << "===== Instruction selection ends:\n";
693 CurDAG->RemoveDeadNodes();
696 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
697 /// the main function.
698 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
699 MachineFrameInfo *MFI) {
700 const TargetInstrInfo *TII = TM.getInstrInfo();
701 if (Subtarget->isTargetCygMing())
702 BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
705 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
706 // If this is main, emit special code for main.
707 MachineBasicBlock *BB = MF.begin();
708 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
709 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
712 /// MatchAddress - Add the specified node to the specified addressing mode,
713 /// returning true if it cannot be done. This just pattern matches for the
715 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
716 bool isRoot, unsigned Depth) {
717 bool is64Bit = Subtarget->is64Bit();
718 DOUT << "MatchAddress: "; DEBUG(AM.dump());
721 return MatchAddressBase(N, AM, isRoot, Depth);
723 // RIP relative addressing: %rip + 32-bit displacement!
725 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
726 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
727 if (!is64Bit || isInt32(AM.Disp + Val)) {
735 switch (N.getOpcode()) {
737 case ISD::Constant: {
738 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
739 if (!is64Bit || isInt32(AM.Disp + Val)) {
746 case X86ISD::Wrapper: {
747 DOUT << "Wrapper: 64bit " << is64Bit;
748 DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
749 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
750 // Also, base and index reg must be 0 in order to use rip as base.
751 if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
752 AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
754 if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
756 // If value is available in a register both base and index components have
757 // been picked, we can't fit the result available in the register in the
758 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
760 SDValue N0 = N.getOperand(0);
761 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
762 uint64_t Offset = G->getOffset();
763 if (!is64Bit || isInt32(AM.Disp + Offset)) {
764 GlobalValue *GV = G->getGlobal();
767 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
770 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
771 uint64_t Offset = CP->getOffset();
772 if (!is64Bit || isInt32(AM.Disp + Offset)) {
773 AM.CP = CP->getConstVal();
774 AM.Align = CP->getAlignment();
776 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
779 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
780 AM.ES = S->getSymbol();
781 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
783 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
784 AM.JT = J->getIndex();
785 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
792 case ISD::FrameIndex:
793 if (AM.BaseType == X86ISelAddressMode::RegBase
794 && AM.Base.Reg.getNode() == 0) {
795 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
796 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
802 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1 || AM.isRIPRel)
806 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
807 unsigned Val = CN->getZExtValue();
808 if (Val == 1 || Val == 2 || Val == 3) {
810 SDValue ShVal = N.getNode()->getOperand(0);
812 // Okay, we know that we have a scale by now. However, if the scaled
813 // value is an add of something and a constant, we can fold the
814 // constant into the disp field here.
815 if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
816 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
817 AM.IndexReg = ShVal.getNode()->getOperand(0);
818 ConstantSDNode *AddVal =
819 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
820 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
821 if (!is64Bit || isInt32(Disp))
835 // A mul_lohi where we need the low part can be folded as a plain multiply.
836 if (N.getResNo() != 0) break;
839 // X*[3,5,9] -> X+X*[2,4,8]
840 if (AM.BaseType == X86ISelAddressMode::RegBase &&
841 AM.Base.Reg.getNode() == 0 &&
842 AM.IndexReg.getNode() == 0 &&
845 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
846 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
847 CN->getZExtValue() == 9) {
848 AM.Scale = unsigned(CN->getZExtValue())-1;
850 SDValue MulVal = N.getNode()->getOperand(0);
853 // Okay, we know that we have a scale by now. However, if the scaled
854 // value is an add of something and a constant, we can fold the
855 // constant into the disp field here.
856 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
857 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
858 Reg = MulVal.getNode()->getOperand(0);
859 ConstantSDNode *AddVal =
860 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
861 uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
863 if (!is64Bit || isInt32(Disp))
866 Reg = N.getNode()->getOperand(0);
868 Reg = N.getNode()->getOperand(0);
871 AM.IndexReg = AM.Base.Reg = Reg;
878 X86ISelAddressMode Backup = AM;
879 if (!MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1) &&
880 !MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1))
883 if (!MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1) &&
884 !MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1))
891 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
892 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
893 X86ISelAddressMode Backup = AM;
894 uint64_t Offset = CN->getSExtValue();
895 // Start with the LHS as an addr mode.
896 if (!MatchAddress(N.getOperand(0), AM, false) &&
897 // Address could not have picked a GV address for the displacement.
899 // On x86-64, the resultant disp must fit in 32-bits.
900 (!is64Bit || isInt32(AM.Disp + Offset)) &&
901 // Check to see if the LHS & C is zero.
902 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
911 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
912 // allows us to fold the shift into this addressing mode.
913 SDValue Shift = N.getOperand(0);
914 if (Shift.getOpcode() != ISD::SHL) break;
916 // Scale must not be used already.
917 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
919 // Not when RIP is used as the base.
920 if (AM.isRIPRel) break;
922 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
923 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
924 if (!C1 || !C2) break;
926 // Not likely to be profitable if either the AND or SHIFT node has more
927 // than one use (unless all uses are for address computation). Besides,
928 // isel mechanism requires their node ids to be reused.
929 if (!N.hasOneUse() || !Shift.hasOneUse())
932 // Verify that the shift amount is something we can fold.
933 unsigned ShiftCst = C1->getZExtValue();
934 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
937 // Get the new AND mask, this folds to a constant.
938 SDValue X = Shift.getOperand(0);
939 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
940 SDValue(C2, 0), SDValue(C1, 0));
941 SDValue NewAND = CurDAG->getNode(ISD::AND, N.getValueType(), X, NewANDMask);
942 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, N.getValueType(),
943 NewAND, SDValue(C1, 0));
945 // Insert the new nodes into the topological ordering.
946 if (C1->getNodeId() > X.getNode()->getNodeId()) {
947 CurDAG->RepositionNode(X.getNode(), C1);
948 C1->setNodeId(X.getNode()->getNodeId());
950 if (NewANDMask.getNode()->getNodeId() == -1 ||
951 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
952 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
953 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
955 if (NewAND.getNode()->getNodeId() == -1 ||
956 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
957 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
958 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
960 if (NewSHIFT.getNode()->getNodeId() == -1 ||
961 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
962 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
963 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
966 CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
968 AM.Scale = 1 << ShiftCst;
969 AM.IndexReg = NewAND;
974 return MatchAddressBase(N, AM, isRoot, Depth);
977 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
978 /// specified addressing mode without any further recursion.
979 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
980 bool isRoot, unsigned Depth) {
981 // Is the base register already occupied?
982 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
983 // If so, check to see if the scale index register is set.
984 if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) {
990 // Otherwise, we cannot select it.
994 // Default, generate it as a register.
995 AM.BaseType = X86ISelAddressMode::RegBase;
1000 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1001 /// It returns the operands which make up the maximal addressing mode it can
1002 /// match by reference.
1003 bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
1004 SDValue &Scale, SDValue &Index,
1006 X86ISelAddressMode AM;
1007 if (MatchAddress(N, AM))
1010 MVT VT = N.getValueType();
1011 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1012 if (!AM.Base.Reg.getNode())
1013 AM.Base.Reg = CurDAG->getRegister(0, VT);
1016 if (!AM.IndexReg.getNode())
1017 AM.IndexReg = CurDAG->getRegister(0, VT);
1019 getAddressOperands(AM, Base, Scale, Index, Disp);
1023 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1024 /// match a load whose top elements are either undef or zeros. The load flavor
1025 /// is derived from the type of N, which is either v4f32 or v2f64.
1026 bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
1027 SDValue N, SDValue &Base,
1028 SDValue &Scale, SDValue &Index,
1029 SDValue &Disp, SDValue &InChain,
1030 SDValue &OutChain) {
1031 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1032 InChain = N.getOperand(0).getValue(1);
1033 if (ISD::isNON_EXTLoad(InChain.getNode()) &&
1034 InChain.getValue(0).hasOneUse() &&
1036 IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) {
1037 LoadSDNode *LD = cast<LoadSDNode>(InChain);
1038 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
1040 OutChain = LD->getChain();
1045 // Also handle the case where we explicitly require zeros in the top
1046 // elements. This is a vector shuffle from the zero vector.
1047 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1048 // Check to see if the top elements are all zeros (or bitcast of zeros).
1049 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1050 N.getOperand(0).getNode()->hasOneUse() &&
1051 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1052 N.getOperand(0).getOperand(0).hasOneUse()) {
1053 // Okay, this is a zero extending load. Fold it.
1054 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1055 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
1057 OutChain = LD->getChain();
1058 InChain = SDValue(LD, 1);
1065 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1066 /// mode it matches can be cost effectively emitted as an LEA instruction.
1067 bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
1068 SDValue &Base, SDValue &Scale,
1069 SDValue &Index, SDValue &Disp) {
1070 X86ISelAddressMode AM;
1071 if (MatchAddress(N, AM))
1074 MVT VT = N.getValueType();
1075 unsigned Complexity = 0;
1076 if (AM.BaseType == X86ISelAddressMode::RegBase)
1077 if (AM.Base.Reg.getNode())
1080 AM.Base.Reg = CurDAG->getRegister(0, VT);
1081 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1084 if (AM.IndexReg.getNode())
1087 AM.IndexReg = CurDAG->getRegister(0, VT);
1089 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1094 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1095 // to a LEA. This is determined with some expermentation but is by no means
1096 // optimal (especially for code size consideration). LEA is nice because of
1097 // its three-address nature. Tweak the cost function again when we can run
1098 // convertToThreeAddress() at register allocation time.
1099 if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
1100 // For X86-64, we should always use lea to materialize RIP relative
1102 if (Subtarget->is64Bit())
1108 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
1111 if (Complexity > 2) {
1112 getAddressOperands(AM, Base, Scale, Index, Disp);
1118 bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
1119 SDValue &Base, SDValue &Scale,
1120 SDValue &Index, SDValue &Disp) {
1121 if (ISD::isNON_EXTLoad(N.getNode()) &&
1123 IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode()))
1124 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
1128 /// getGlobalBaseReg - Return an SDNode that returns the value of
1129 /// the global base register. Output instructions required to
1130 /// initialize the global base register, if necessary.
1132 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1133 MachineFunction *MF = CurBB->getParent();
1134 unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF);
1135 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1138 static SDNode *FindCallStartFromCall(SDNode *Node) {
1139 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1140 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1141 "Node doesn't have a token chain argument!");
1142 return FindCallStartFromCall(Node->getOperand(0).getNode());
1145 /// getTruncateTo8Bit - return an SDNode that implements a subreg based
1146 /// truncate of the specified operand to i8. This can be done with tablegen,
1147 /// except that this code uses MVT::Flag in a tricky way that happens to
1148 /// improve scheduling in some cases.
1149 SDNode *X86DAGToDAGISel::getTruncateTo8Bit(SDValue N0) {
1150 assert(!Subtarget->is64Bit() &&
1151 "getTruncateTo8Bit is only needed on x86-32!");
1152 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1154 // Ensure that the source register has an 8-bit subreg on 32-bit targets
1156 MVT N0VT = N0.getValueType();
1157 switch (N0VT.getSimpleVT()) {
1158 default: assert(0 && "Unknown truncate!");
1160 Opc = X86::MOV16to16_;
1163 Opc = X86::MOV32to32_;
1167 // The use of MVT::Flag here is not strictly accurate, but it helps
1168 // scheduling in some cases.
1169 N0 = SDValue(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0);
1170 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1171 MVT::i8, N0, SRIdx, N0.getValue(1));
1174 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1175 SDValue Chain = Node->getOperand(0);
1176 SDValue In1 = Node->getOperand(1);
1177 SDValue In2L = Node->getOperand(2);
1178 SDValue In2H = Node->getOperand(3);
1179 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1180 if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3))
1182 SDValue LSI = Node->getOperand(4); // MemOperand
1183 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, In2L, In2H, LSI, Chain };
1184 return CurDAG->getTargetNode(Opc, MVT::i32, MVT::i32, MVT::Other, Ops, 8);
1187 SDNode *X86DAGToDAGISel::Select(SDValue N) {
1188 SDNode *Node = N.getNode();
1189 MVT NVT = Node->getValueType(0);
1191 unsigned Opcode = Node->getOpcode();
1194 DOUT << std::string(Indent, ' ') << "Selecting: ";
1195 DEBUG(Node->dump(CurDAG));
1200 if (Node->isMachineOpcode()) {
1202 DOUT << std::string(Indent-2, ' ') << "== ";
1203 DEBUG(Node->dump(CurDAG));
1207 return NULL; // Already selected.
1212 case X86ISD::GlobalBaseReg:
1213 return getGlobalBaseReg();
1215 case X86ISD::ATOMOR64_DAG:
1216 return SelectAtomic64(Node, X86::ATOMOR6432);
1217 case X86ISD::ATOMXOR64_DAG:
1218 return SelectAtomic64(Node, X86::ATOMXOR6432);
1219 case X86ISD::ATOMADD64_DAG:
1220 return SelectAtomic64(Node, X86::ATOMADD6432);
1221 case X86ISD::ATOMSUB64_DAG:
1222 return SelectAtomic64(Node, X86::ATOMSUB6432);
1223 case X86ISD::ATOMNAND64_DAG:
1224 return SelectAtomic64(Node, X86::ATOMNAND6432);
1225 case X86ISD::ATOMAND64_DAG:
1226 return SelectAtomic64(Node, X86::ATOMAND6432);
1227 case X86ISD::ATOMSWAP64_DAG:
1228 return SelectAtomic64(Node, X86::ATOMSWAP6432);
1230 case ISD::SMUL_LOHI:
1231 case ISD::UMUL_LOHI: {
1232 SDValue N0 = Node->getOperand(0);
1233 SDValue N1 = Node->getOperand(1);
1235 bool isSigned = Opcode == ISD::SMUL_LOHI;
1237 switch (NVT.getSimpleVT()) {
1238 default: assert(0 && "Unsupported VT!");
1239 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1240 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1241 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1242 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1245 switch (NVT.getSimpleVT()) {
1246 default: assert(0 && "Unsupported VT!");
1247 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1248 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1249 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1250 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1253 unsigned LoReg, HiReg;
1254 switch (NVT.getSimpleVT()) {
1255 default: assert(0 && "Unsupported VT!");
1256 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1257 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1258 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1259 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1262 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1263 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1264 // multiplty is commmutative
1266 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
1271 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
1272 N0, SDValue()).getValue(1);
1275 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1277 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1278 InFlag = SDValue(CNode, 1);
1279 // Update the chain.
1280 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1283 SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1286 // Copy the low half of the result, if it is needed.
1287 if (!N.getValue(0).use_empty()) {
1288 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1289 LoReg, NVT, InFlag);
1290 InFlag = Result.getValue(2);
1291 ReplaceUses(N.getValue(0), Result);
1293 DOUT << std::string(Indent-2, ' ') << "=> ";
1294 DEBUG(Result.getNode()->dump(CurDAG));
1298 // Copy the high half of the result, if it is needed.
1299 if (!N.getValue(1).use_empty()) {
1301 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1302 // Prevent use of AH in a REX instruction by referencing AX instead.
1303 // Shift it down 8 bits.
1304 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1305 X86::AX, MVT::i16, InFlag);
1306 InFlag = Result.getValue(2);
1307 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1308 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1309 // Then truncate it down to i8.
1310 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1311 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1312 MVT::i8, Result, SRIdx), 0);
1314 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1315 HiReg, NVT, InFlag);
1316 InFlag = Result.getValue(2);
1318 ReplaceUses(N.getValue(1), Result);
1320 DOUT << std::string(Indent-2, ' ') << "=> ";
1321 DEBUG(Result.getNode()->dump(CurDAG));
1334 case ISD::UDIVREM: {
1335 SDValue N0 = Node->getOperand(0);
1336 SDValue N1 = Node->getOperand(1);
1338 bool isSigned = Opcode == ISD::SDIVREM;
1340 switch (NVT.getSimpleVT()) {
1341 default: assert(0 && "Unsupported VT!");
1342 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1343 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1344 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1345 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1348 switch (NVT.getSimpleVT()) {
1349 default: assert(0 && "Unsupported VT!");
1350 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1351 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1352 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1353 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1356 unsigned LoReg, HiReg;
1357 unsigned ClrOpcode, SExtOpcode;
1358 switch (NVT.getSimpleVT()) {
1359 default: assert(0 && "Unsupported VT!");
1361 LoReg = X86::AL; HiReg = X86::AH;
1363 SExtOpcode = X86::CBW;
1366 LoReg = X86::AX; HiReg = X86::DX;
1367 ClrOpcode = X86::MOV16r0;
1368 SExtOpcode = X86::CWD;
1371 LoReg = X86::EAX; HiReg = X86::EDX;
1372 ClrOpcode = X86::MOV32r0;
1373 SExtOpcode = X86::CDQ;
1376 LoReg = X86::RAX; HiReg = X86::RDX;
1377 ClrOpcode = X86::MOV64r0;
1378 SExtOpcode = X86::CQO;
1382 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1383 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1384 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
1387 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
1388 // Special case for div8, just use a move with zero extension to AX to
1389 // clear the upper 8 bits (AH).
1390 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1391 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1392 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1394 SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1396 Chain = Move.getValue(1);
1397 ReplaceUses(N0.getValue(1), Chain);
1400 SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1401 Chain = CurDAG->getEntryNode();
1403 Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDValue());
1404 InFlag = Chain.getValue(1);
1407 CurDAG->getCopyToReg(CurDAG->getEntryNode(),
1408 LoReg, N0, SDValue()).getValue(1);
1409 if (isSigned && !signBitIsZero) {
1410 // Sign extend the low part into the high part.
1412 SDValue(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1414 // Zero out the high part, effectively zero extending the input.
1415 SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
1416 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
1417 ClrNode, InFlag).getValue(1);
1422 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1424 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1425 InFlag = SDValue(CNode, 1);
1426 // Update the chain.
1427 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1430 SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1433 // Copy the division (low) result, if it is needed.
1434 if (!N.getValue(0).use_empty()) {
1435 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1436 LoReg, NVT, InFlag);
1437 InFlag = Result.getValue(2);
1438 ReplaceUses(N.getValue(0), Result);
1440 DOUT << std::string(Indent-2, ' ') << "=> ";
1441 DEBUG(Result.getNode()->dump(CurDAG));
1445 // Copy the remainder (high) result, if it is needed.
1446 if (!N.getValue(1).use_empty()) {
1448 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1449 // Prevent use of AH in a REX instruction by referencing AX instead.
1450 // Shift it down 8 bits.
1451 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1452 X86::AX, MVT::i16, InFlag);
1453 InFlag = Result.getValue(2);
1454 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1455 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1456 // Then truncate it down to i8.
1457 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1458 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1459 MVT::i8, Result, SRIdx), 0);
1461 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1462 HiReg, NVT, InFlag);
1463 InFlag = Result.getValue(2);
1465 ReplaceUses(N.getValue(1), Result);
1467 DOUT << std::string(Indent-2, ' ') << "=> ";
1468 DEBUG(Result.getNode()->dump(CurDAG));
1480 case ISD::SIGN_EXTEND_INREG: {
1481 MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
1482 if (SVT == MVT::i8 && !Subtarget->is64Bit()) {
1483 SDValue N0 = Node->getOperand(0);
1485 SDValue TruncOp = SDValue(getTruncateTo8Bit(N0), 0);
1487 switch (NVT.getSimpleVT()) {
1488 default: assert(0 && "Unknown sign_extend_inreg!");
1490 Opc = X86::MOVSX16rr8;
1493 Opc = X86::MOVSX32rr8;
1497 SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
1500 DOUT << std::string(Indent-2, ' ') << "=> ";
1501 DEBUG(TruncOp.getNode()->dump(CurDAG));
1503 DOUT << std::string(Indent-2, ' ') << "=> ";
1504 DEBUG(ResNode->dump(CurDAG));
1513 case ISD::TRUNCATE: {
1514 if (NVT == MVT::i8 && !Subtarget->is64Bit()) {
1515 SDValue Input = Node->getOperand(0);
1516 SDNode *ResNode = getTruncateTo8Bit(Input);
1519 DOUT << std::string(Indent-2, ' ') << "=> ";
1520 DEBUG(ResNode->dump(CurDAG));
1529 case ISD::DECLARE: {
1530 // Handle DECLARE nodes here because the second operand may have been
1531 // wrapped in X86ISD::Wrapper.
1532 SDValue Chain = Node->getOperand(0);
1533 SDValue N1 = Node->getOperand(1);
1534 SDValue N2 = Node->getOperand(2);
1535 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
1538 if (N2.getOpcode() == ISD::ADD &&
1539 N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
1540 N2 = N2.getOperand(1);
1541 if (N2.getOpcode() != X86ISD::Wrapper)
1543 GlobalAddressSDNode *GVNode =
1544 dyn_cast<GlobalAddressSDNode>(N2.getOperand(0));
1547 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
1548 TLI.getPointerTy());
1549 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GVNode->getGlobal(),
1550 TLI.getPointerTy());
1551 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1552 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
1553 MVT::Other, Ops, 3);
1558 SDNode *ResNode = SelectCode(N);
1561 DOUT << std::string(Indent-2, ' ') << "=> ";
1562 if (ResNode == NULL || ResNode == N.getNode())
1563 DEBUG(N.getNode()->dump(CurDAG));
1565 DEBUG(ResNode->dump(CurDAG));
1573 bool X86DAGToDAGISel::
1574 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1575 std::vector<SDValue> &OutOps) {
1576 SDValue Op0, Op1, Op2, Op3;
1577 switch (ConstraintCode) {
1578 case 'o': // offsetable ??
1579 case 'v': // not offsetable ??
1580 default: return true;
1582 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1587 OutOps.push_back(Op0);
1588 OutOps.push_back(Op1);
1589 OutOps.push_back(Op2);
1590 OutOps.push_back(Op3);
1594 /// createX86ISelDag - This pass converts a legalized DAG into a
1595 /// X86-specific DAG, ready for instruction scheduling.
1597 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1598 return new X86DAGToDAGISel(TM, Fast);