1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/Streams.h"
40 #include "llvm/ADT/SmallPtrSet.h"
41 #include "llvm/ADT/Statistic.h"
44 #include "llvm/Support/CommandLine.h"
45 static cl::opt<bool> AvoidDupAddrCompute("x86-avoid-dup-address", cl::Hidden);
47 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
49 //===----------------------------------------------------------------------===//
50 // Pattern Matcher Implementation
51 //===----------------------------------------------------------------------===//
54 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
55 /// SDValue's instead of register numbers for the leaves of the matched
57 struct X86ISelAddressMode {
63 struct { // This is really a union, discriminated by BaseType!
68 bool isRIPRel; // RIP as base?
77 unsigned Align; // CP alignment.
80 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
81 Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0) {
84 bool hasSymbolicDisplacement() const {
85 return GV != 0 || CP != 0 || ES != 0 || JT != -1;
89 cerr << "X86ISelAddressMode " << this << "\n";
91 if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
93 cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
94 cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n";
96 if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
98 cerr << " Disp " << Disp << "\n";
99 cerr << "GV "; if (GV) GV->dump();
101 cerr << " CP "; if (CP) CP->dump();
104 cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
105 cerr << " JT" << JT << " Align" << Align << "\n";
111 //===--------------------------------------------------------------------===//
112 /// ISel - X86 specific code to select X86 machine instructions for
113 /// SelectionDAG operations.
115 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
116 /// TM - Keep a reference to X86TargetMachine.
118 X86TargetMachine &TM;
120 /// X86Lowering - This object fully describes how to lower LLVM code to an
121 /// X86-specific SelectionDAG.
122 X86TargetLowering &X86Lowering;
124 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
125 /// make the right decision when generating code for different targets.
126 const X86Subtarget *Subtarget;
128 /// CurBB - Current BB being isel'd.
130 MachineBasicBlock *CurBB;
132 /// OptForSize - If true, selector should try to optimize for code size
133 /// instead of performance.
137 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
138 : SelectionDAGISel(tm, fast),
139 TM(tm), X86Lowering(*TM.getTargetLowering()),
140 Subtarget(&TM.getSubtarget<X86Subtarget>()),
143 virtual const char *getPassName() const {
144 return "X86 DAG->DAG Instruction Selection";
147 /// InstructionSelect - This callback is invoked by
148 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
149 virtual void InstructionSelect();
151 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
154 bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const;
156 // Include the pieces autogenerated from the target description.
157 #include "X86GenDAGISel.inc"
160 SDNode *Select(SDValue N);
161 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
163 bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
164 bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
165 bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
167 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
168 bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
169 SDValue &Scale, SDValue &Index, SDValue &Disp,
171 bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
172 SDValue &Scale, SDValue &Index, SDValue &Disp);
173 bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
174 SDValue N, SDValue &Base, SDValue &Scale,
175 SDValue &Index, SDValue &Disp,
177 SDValue &InChain, SDValue &OutChain);
178 bool TryFoldLoad(SDValue P, SDValue N,
179 SDValue &Base, SDValue &Scale,
180 SDValue &Index, SDValue &Disp,
182 void PreprocessForRMW();
183 void PreprocessForFPConvert();
185 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
186 /// inline asm expressions.
187 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
189 std::vector<SDValue> &OutOps);
191 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
193 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
194 SDValue &Scale, SDValue &Index,
195 SDValue &Disp, SDValue &Segment) {
196 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
197 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
199 Scale = getI8Imm(AM.Scale);
201 // These are 32-bit even in 64-bit mode since RIP relative offset
204 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
206 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
209 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
210 else if (AM.JT != -1)
211 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
213 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
215 if (AM.Segment.getNode())
216 Segment = AM.Segment;
218 Segment = CurDAG->getRegister(0, MVT::i32);
221 /// getI8Imm - Return a target constant with the specified value, of type
223 inline SDValue getI8Imm(unsigned Imm) {
224 return CurDAG->getTargetConstant(Imm, MVT::i8);
227 /// getI16Imm - Return a target constant with the specified value, of type
229 inline SDValue getI16Imm(unsigned Imm) {
230 return CurDAG->getTargetConstant(Imm, MVT::i16);
233 /// getI32Imm - Return a target constant with the specified value, of type
235 inline SDValue getI32Imm(unsigned Imm) {
236 return CurDAG->getTargetConstant(Imm, MVT::i32);
239 /// getGlobalBaseReg - Return an SDNode that returns the value of
240 /// the global base register. Output instructions required to
241 /// initialize the global base register, if necessary.
243 SDNode *getGlobalBaseReg();
245 /// getTruncateTo8Bit - return an SDNode that implements a subreg based
246 /// truncate of the specified operand to i8. This can be done with tablegen,
247 /// except that this code uses MVT::Flag in a tricky way that happens to
248 /// improve scheduling in some cases.
249 SDNode *getTruncateTo8Bit(SDValue N0);
257 /// findFlagUse - Return use of MVT::Flag value produced by the specified
260 static SDNode *findFlagUse(SDNode *N) {
261 unsigned FlagResNo = N->getNumValues()-1;
262 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
263 SDUse &Use = I.getUse();
264 if (Use.getResNo() == FlagResNo)
265 return Use.getUser();
270 /// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
271 /// This function recursively traverses up the operand chain, ignoring
273 static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
275 SmallPtrSet<SDNode*, 16> &Visited) {
276 if (Use->getNodeId() < Def->getNodeId() ||
277 !Visited.insert(Use))
280 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
281 SDNode *N = Use->getOperand(i).getNode();
283 if (Use == ImmedUse || Use == Root)
284 continue; // We are not looking for immediate use.
289 // Traverse up the operand chain.
290 if (findNonImmUse(N, Def, ImmedUse, Root, Visited))
296 /// isNonImmUse - Start searching from Root up the DAG to check is Def can
297 /// be reached. Return true if that's the case. However, ignore direct uses
298 /// by ImmedUse (which would be U in the example illustrated in
299 /// IsLegalAndProfitableToFold) and by Root (which can happen in the store
301 /// FIXME: to be really generic, we should allow direct use by any node
302 /// that is being folded. But realisticly since we only fold loads which
303 /// have one non-chain use, we only need to watch out for load/op/store
304 /// and load/op/cmp case where the root (store / cmp) may reach the load via
305 /// its chain operand.
306 static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse) {
307 SmallPtrSet<SDNode*, 16> Visited;
308 return findNonImmUse(Root, Def, ImmedUse, Root, Visited);
312 bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
313 SDNode *Root) const {
314 if (Fast) return false;
317 switch (U->getOpcode()) {
325 SDValue Op1 = U->getOperand(1);
327 // If the other operand is a 8-bit immediate we should fold the immediate
328 // instead. This reduces code size.
330 // movl 4(%esp), %eax
334 // addl 4(%esp), %eax
335 // The former is 2 bytes shorter. In case where the increment is 1, then
336 // the saving can be 4 bytes (by using incl %eax).
337 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
338 if (Imm->getAPIntValue().isSignedIntN(8))
341 // If the other operand is a TLS address, we should fold it instead.
344 // leal i@NTPOFF(%eax), %eax
346 // movl $i@NTPOFF, %eax
348 // if the block also has an access to a second TLS address this will save
350 // FIXME: This is probably also true for non TLS addresses.
351 if (Op1.getOpcode() == X86ISD::Wrapper) {
352 SDValue Val = Op1.getOperand(0);
353 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
359 // If Root use can somehow reach N through a path that that doesn't contain
360 // U then folding N would create a cycle. e.g. In the following
361 // diagram, Root can reach N through X. If N is folded into into Root, then
362 // X is both a predecessor and a successor of U.
373 // * indicates nodes to be folded together.
375 // If Root produces a flag, then it gets (even more) interesting. Since it
376 // will be "glued" together with its flag use in the scheduler, we need to
377 // check if it might reach N.
396 // If FU (flag use) indirectly reaches N (the load), and Root folds N
397 // (call it Fold), then X is a predecessor of FU and a successor of
398 // Fold. But since Fold and FU are flagged together, this will create
399 // a cycle in the scheduling graph.
401 MVT VT = Root->getValueType(Root->getNumValues()-1);
402 while (VT == MVT::Flag) {
403 SDNode *FU = findFlagUse(Root);
407 VT = Root->getValueType(Root->getNumValues()-1);
410 return !isNonImmUse(Root, N, U);
413 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
414 /// and move load below the TokenFactor. Replace store's chain operand with
415 /// load's chain result.
416 static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load,
417 SDValue Store, SDValue TF) {
418 SmallVector<SDValue, 4> Ops;
419 for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i)
420 if (Load.getNode() == TF.getOperand(i).getNode())
421 Ops.push_back(Load.getOperand(0));
423 Ops.push_back(TF.getOperand(i));
424 CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size());
425 CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
426 CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
427 Store.getOperand(2), Store.getOperand(3));
430 /// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
432 static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address,
434 if (N.getOpcode() == ISD::BIT_CONVERT)
437 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
438 if (!LD || LD->isVolatile())
440 if (LD->getAddressingMode() != ISD::UNINDEXED)
443 ISD::LoadExtType ExtType = LD->getExtensionType();
444 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
448 N.getOperand(1) == Address &&
449 N.getNode()->isOperandOf(Chain.getNode())) {
456 /// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
457 /// operand and move load below the call's chain operand.
458 static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
459 SDValue Call, SDValue CallSeqStart) {
460 SmallVector<SDValue, 8> Ops;
461 SDValue Chain = CallSeqStart.getOperand(0);
462 if (Chain.getNode() == Load.getNode())
463 Ops.push_back(Load.getOperand(0));
465 assert(Chain.getOpcode() == ISD::TokenFactor &&
466 "Unexpected CallSeqStart chain operand");
467 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
468 if (Chain.getOperand(i).getNode() == Load.getNode())
469 Ops.push_back(Load.getOperand(0));
471 Ops.push_back(Chain.getOperand(i));
473 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
474 MVT::Other, &Ops[0], Ops.size());
476 Ops.push_back(NewChain);
478 for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i)
479 Ops.push_back(CallSeqStart.getOperand(i));
480 CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size());
481 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
482 Load.getOperand(1), Load.getOperand(2));
484 Ops.push_back(SDValue(Load.getNode(), 1));
485 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
486 Ops.push_back(Call.getOperand(i));
487 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
490 /// isCalleeLoad - Return true if call address is a load and it can be
491 /// moved below CALLSEQ_START and the chains leading up to the call.
492 /// Return the CALLSEQ_START by reference as a second output.
493 static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
494 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
496 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
499 LD->getAddressingMode() != ISD::UNINDEXED ||
500 LD->getExtensionType() != ISD::NON_EXTLOAD)
503 // Now let's find the callseq_start.
504 while (Chain.getOpcode() != ISD::CALLSEQ_START) {
505 if (!Chain.hasOneUse())
507 Chain = Chain.getOperand(0);
510 if (Chain.getOperand(0).getNode() == Callee.getNode())
512 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
513 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()))
519 /// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
520 /// This is only run if not in -fast mode (aka -O0).
521 /// This allows the instruction selector to pick more read-modify-write
522 /// instructions. This is a common case:
532 /// [TokenFactor] [Op]
539 /// The fact the store's chain operand != load's chain will prevent the
540 /// (store (op (load))) instruction from being selected. We can transform it to:
559 void X86DAGToDAGISel::PreprocessForRMW() {
560 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
561 E = CurDAG->allnodes_end(); I != E; ++I) {
562 if (I->getOpcode() == X86ISD::CALL) {
563 /// Also try moving call address load from outside callseq_start to just
564 /// before the call to allow it to be folded.
582 SDValue Chain = I->getOperand(0);
583 SDValue Load = I->getOperand(1);
584 if (!isCalleeLoad(Load, Chain))
586 MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain);
591 if (!ISD::isNON_TRUNCStore(I))
593 SDValue Chain = I->getOperand(0);
595 if (Chain.getNode()->getOpcode() != ISD::TokenFactor)
598 SDValue N1 = I->getOperand(1);
599 SDValue N2 = I->getOperand(2);
600 if ((N1.getValueType().isFloatingPoint() &&
601 !N1.getValueType().isVector()) ||
607 unsigned Opcode = N1.getNode()->getOpcode();
616 case ISD::VECTOR_SHUFFLE: {
617 SDValue N10 = N1.getOperand(0);
618 SDValue N11 = N1.getOperand(1);
619 RModW = isRMWLoad(N10, Chain, N2, Load);
621 RModW = isRMWLoad(N11, Chain, N2, Load);
634 SDValue N10 = N1.getOperand(0);
635 RModW = isRMWLoad(N10, Chain, N2, Load);
641 MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain);
648 /// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
649 /// nodes that target the FP stack to be store and load to the stack. This is a
650 /// gross hack. We would like to simply mark these as being illegal, but when
651 /// we do that, legalize produces these when it expands calls, then expands
652 /// these in the same legalize pass. We would like dag combine to be able to
653 /// hack on these between the call expansion and the node legalization. As such
654 /// this pass basically does "really late" legalization of these inline with the
656 void X86DAGToDAGISel::PreprocessForFPConvert() {
657 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
658 E = CurDAG->allnodes_end(); I != E; ) {
659 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
660 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
663 // If the source and destination are SSE registers, then this is a legal
664 // conversion that should not be lowered.
665 MVT SrcVT = N->getOperand(0).getValueType();
666 MVT DstVT = N->getValueType(0);
667 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
668 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
669 if (SrcIsSSE && DstIsSSE)
672 if (!SrcIsSSE && !DstIsSSE) {
673 // If this is an FPStack extension, it is a noop.
674 if (N->getOpcode() == ISD::FP_EXTEND)
676 // If this is a value-preserving FPStack truncation, it is a noop.
677 if (N->getConstantOperandVal(1))
681 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
682 // FPStack has extload and truncstore. SSE can fold direct loads into other
683 // operations. Based on this, decide what we want to do.
685 if (N->getOpcode() == ISD::FP_ROUND)
686 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
688 MemVT = SrcIsSSE ? SrcVT : DstVT;
690 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
691 DebugLoc dl = N->getDebugLoc();
693 // FIXME: optimize the case where the src/dest is a load or store?
694 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
696 MemTmp, NULL, 0, MemVT);
697 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
700 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
701 // extload we created. This will cause general havok on the dag because
702 // anything below the conversion could be folded into other existing nodes.
703 // To avoid invalidating 'I', back it up to the convert node.
705 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
707 // Now that we did that, the node is dead. Increment the iterator to the
708 // next node to process, then delete N.
710 CurDAG->DeleteNode(N);
714 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
715 /// when it has created a SelectionDAG for us to codegen.
716 void X86DAGToDAGISel::InstructionSelect() {
717 CurBB = BB; // BB can change as result of isel.
718 const Function *F = CurDAG->getMachineFunction().getFunction();
719 OptForSize = F->hasFnAttr(Attribute::OptimizeForSize);
725 // FIXME: This should only happen when not -fast.
726 PreprocessForFPConvert();
728 // Codegen the basic block.
730 DOUT << "===== Instruction selection begins:\n";
735 DOUT << "===== Instruction selection ends:\n";
738 CurDAG->RemoveDeadNodes();
741 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
742 /// the main function.
743 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
744 MachineFrameInfo *MFI) {
745 const TargetInstrInfo *TII = TM.getInstrInfo();
746 if (Subtarget->isTargetCygMing())
747 BuildMI(BB, DebugLoc::getUnknownLoc(),
748 TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
751 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
752 // If this is main, emit special code for main.
753 MachineBasicBlock *BB = MF.begin();
754 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
755 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
759 bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N,
760 X86ISelAddressMode &AM) {
761 assert(N.getOpcode() == X86ISD::SegmentBaseAddress);
762 SDValue Segment = N.getOperand(0);
764 if (AM.Segment.getNode() == 0) {
765 AM.Segment = Segment;
772 bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
773 // This optimization is valid because the GNU TLS model defines that
774 // gs:0 (or fs:0 on X86-64) contains its own address.
775 // For more information see http://people.redhat.com/drepper/tls.pdf
777 SDValue Address = N.getOperand(1);
778 if (Address.getOpcode() == X86ISD::SegmentBaseAddress &&
779 !MatchSegmentBaseAddress (Address, AM))
785 /// MatchAddress - Add the specified node to the specified addressing mode,
786 /// returning true if it cannot be done. This just pattern matches for the
788 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
790 bool is64Bit = Subtarget->is64Bit();
791 DebugLoc dl = N.getDebugLoc();
792 DOUT << "MatchAddress: "; DEBUG(AM.dump());
795 return MatchAddressBase(N, AM);
797 // RIP relative addressing: %rip + 32-bit displacement!
799 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
800 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
801 if (!is64Bit || isInt32(AM.Disp + Val)) {
809 switch (N.getOpcode()) {
811 case ISD::Constant: {
812 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
813 if (!is64Bit || isInt32(AM.Disp + Val)) {
820 case X86ISD::SegmentBaseAddress:
821 if (!MatchSegmentBaseAddress(N, AM))
825 case X86ISD::Wrapper: {
826 DOUT << "Wrapper: 64bit " << is64Bit;
827 DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
828 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
829 // Also, base and index reg must be 0 in order to use rip as base.
830 if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
831 AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
833 if (AM.hasSymbolicDisplacement())
835 // If value is available in a register both base and index components have
836 // been picked, we can't fit the result available in the register in the
837 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
839 SDValue N0 = N.getOperand(0);
840 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
841 uint64_t Offset = G->getOffset();
842 if (!is64Bit || isInt32(AM.Disp + Offset)) {
843 GlobalValue *GV = G->getGlobal();
846 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
849 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
850 uint64_t Offset = CP->getOffset();
851 if (!is64Bit || isInt32(AM.Disp + Offset)) {
852 AM.CP = CP->getConstVal();
853 AM.Align = CP->getAlignment();
855 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
858 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
859 AM.ES = S->getSymbol();
860 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
862 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
863 AM.JT = J->getIndex();
864 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
872 if (!MatchLoad(N, AM))
876 case ISD::FrameIndex:
877 if (AM.BaseType == X86ISelAddressMode::RegBase
878 && AM.Base.Reg.getNode() == 0) {
879 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
880 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
886 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1 || AM.isRIPRel)
890 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
891 unsigned Val = CN->getZExtValue();
892 if (Val == 1 || Val == 2 || Val == 3) {
894 SDValue ShVal = N.getNode()->getOperand(0);
896 // Okay, we know that we have a scale by now. However, if the scaled
897 // value is an add of something and a constant, we can fold the
898 // constant into the disp field here.
899 if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
900 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
901 AM.IndexReg = ShVal.getNode()->getOperand(0);
902 ConstantSDNode *AddVal =
903 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
904 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
905 if (!is64Bit || isInt32(Disp))
919 // A mul_lohi where we need the low part can be folded as a plain multiply.
920 if (N.getResNo() != 0) break;
923 case X86ISD::MUL_IMM:
924 // X*[3,5,9] -> X+X*[2,4,8]
925 if (AM.BaseType == X86ISelAddressMode::RegBase &&
926 AM.Base.Reg.getNode() == 0 &&
927 AM.IndexReg.getNode() == 0 &&
930 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
931 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
932 CN->getZExtValue() == 9) {
933 AM.Scale = unsigned(CN->getZExtValue())-1;
935 SDValue MulVal = N.getNode()->getOperand(0);
938 // Okay, we know that we have a scale by now. However, if the scaled
939 // value is an add of something and a constant, we can fold the
940 // constant into the disp field here.
941 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
942 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
943 Reg = MulVal.getNode()->getOperand(0);
944 ConstantSDNode *AddVal =
945 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
946 uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
948 if (!is64Bit || isInt32(Disp))
951 Reg = N.getNode()->getOperand(0);
953 Reg = N.getNode()->getOperand(0);
956 AM.IndexReg = AM.Base.Reg = Reg;
963 X86ISelAddressMode Backup = AM;
964 if (!MatchAddress(N.getNode()->getOperand(0), AM, Depth+1) &&
965 !MatchAddress(N.getNode()->getOperand(1), AM, Depth+1))
968 if (!MatchAddress(N.getNode()->getOperand(1), AM, Depth+1) &&
969 !MatchAddress(N.getNode()->getOperand(0), AM, Depth+1))
973 // If we couldn't fold both operands into the address at the same time,
974 // see if we can just put each operand into a register and fold at least
976 if (AM.BaseType == X86ISelAddressMode::RegBase &&
977 !AM.Base.Reg.getNode() &&
978 !AM.IndexReg.getNode() &&
980 AM.Base.Reg = N.getNode()->getOperand(0);
981 AM.IndexReg = N.getNode()->getOperand(1);
989 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
990 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
991 X86ISelAddressMode Backup = AM;
992 uint64_t Offset = CN->getSExtValue();
993 // Start with the LHS as an addr mode.
994 if (!MatchAddress(N.getOperand(0), AM, Depth+1) &&
995 // Address could not have picked a GV address for the displacement.
997 // On x86-64, the resultant disp must fit in 32-bits.
998 (!is64Bit || isInt32(AM.Disp + Offset)) &&
999 // Check to see if the LHS & C is zero.
1000 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
1009 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
1010 // allows us to fold the shift into this addressing mode.
1011 SDValue Shift = N.getOperand(0);
1012 if (Shift.getOpcode() != ISD::SHL) break;
1014 // Scale must not be used already.
1015 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1017 // Not when RIP is used as the base.
1018 if (AM.isRIPRel) break;
1020 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
1021 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
1022 if (!C1 || !C2) break;
1024 // Not likely to be profitable if either the AND or SHIFT node has more
1025 // than one use (unless all uses are for address computation). Besides,
1026 // isel mechanism requires their node ids to be reused.
1027 if (!N.hasOneUse() || !Shift.hasOneUse())
1030 // Verify that the shift amount is something we can fold.
1031 unsigned ShiftCst = C1->getZExtValue();
1032 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
1035 // Get the new AND mask, this folds to a constant.
1036 SDValue X = Shift.getOperand(0);
1037 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1038 SDValue(C2, 0), SDValue(C1, 0));
1039 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
1041 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1042 NewAND, SDValue(C1, 0));
1044 // Insert the new nodes into the topological ordering.
1045 if (C1->getNodeId() > X.getNode()->getNodeId()) {
1046 CurDAG->RepositionNode(X.getNode(), C1);
1047 C1->setNodeId(X.getNode()->getNodeId());
1049 if (NewANDMask.getNode()->getNodeId() == -1 ||
1050 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1051 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
1052 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
1054 if (NewAND.getNode()->getNodeId() == -1 ||
1055 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1056 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
1057 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
1059 if (NewSHIFT.getNode()->getNodeId() == -1 ||
1060 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1061 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
1062 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
1065 CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
1067 AM.Scale = 1 << ShiftCst;
1068 AM.IndexReg = NewAND;
1073 return MatchAddressBase(N, AM);
1076 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1077 /// specified addressing mode without any further recursion.
1078 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1079 // Is the base register already occupied?
1080 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
1081 // If so, check to see if the scale index register is set.
1082 if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) {
1088 // Otherwise, we cannot select it.
1092 // Default, generate it as a register.
1093 AM.BaseType = X86ISelAddressMode::RegBase;
1098 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1099 /// It returns the operands which make up the maximal addressing mode it can
1100 /// match by reference.
1101 bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
1102 SDValue &Scale, SDValue &Index,
1103 SDValue &Disp, SDValue &Segment) {
1104 X86ISelAddressMode AM;
1106 if (AvoidDupAddrCompute && !N.hasOneUse()) {
1107 unsigned Opcode = N.getOpcode();
1108 if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex &&
1109 Opcode != X86ISD::Wrapper) {
1110 // If we are able to fold N into addressing mode, then we'll allow it even
1111 // if N has multiple uses. In general, addressing computation is used as
1112 // addresses by all of its uses. But watch out for CopyToReg uses, that
1113 // means the address computation is liveout. It will be computed by a LEA
1114 // so we want to avoid computing the address twice.
1115 for (SDNode::use_iterator UI = N.getNode()->use_begin(),
1116 UE = N.getNode()->use_end(); UI != UE; ++UI) {
1117 if (UI->getOpcode() == ISD::CopyToReg) {
1118 MatchAddressBase(N, AM);
1126 if (!Done && MatchAddress(N, AM))
1129 MVT VT = N.getValueType();
1130 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1131 if (!AM.Base.Reg.getNode())
1132 AM.Base.Reg = CurDAG->getRegister(0, VT);
1135 if (!AM.IndexReg.getNode())
1136 AM.IndexReg = CurDAG->getRegister(0, VT);
1138 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1142 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1143 /// match a load whose top elements are either undef or zeros. The load flavor
1144 /// is derived from the type of N, which is either v4f32 or v2f64.
1145 bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
1146 SDValue N, SDValue &Base,
1147 SDValue &Scale, SDValue &Index,
1148 SDValue &Disp, SDValue &Segment,
1150 SDValue &OutChain) {
1151 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1152 InChain = N.getOperand(0).getValue(1);
1153 if (ISD::isNON_EXTLoad(InChain.getNode()) &&
1154 InChain.getValue(0).hasOneUse() &&
1156 IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) {
1157 LoadSDNode *LD = cast<LoadSDNode>(InChain);
1158 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1160 OutChain = LD->getChain();
1165 // Also handle the case where we explicitly require zeros in the top
1166 // elements. This is a vector shuffle from the zero vector.
1167 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1168 // Check to see if the top elements are all zeros (or bitcast of zeros).
1169 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1170 N.getOperand(0).getNode()->hasOneUse() &&
1171 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1172 N.getOperand(0).getOperand(0).hasOneUse()) {
1173 // Okay, this is a zero extending load. Fold it.
1174 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1175 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1177 OutChain = LD->getChain();
1178 InChain = SDValue(LD, 1);
1185 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1186 /// mode it matches can be cost effectively emitted as an LEA instruction.
1187 bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
1188 SDValue &Base, SDValue &Scale,
1189 SDValue &Index, SDValue &Disp) {
1190 X86ISelAddressMode AM;
1192 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1194 SDValue Copy = AM.Segment;
1195 SDValue T = CurDAG->getRegister(0, MVT::i32);
1197 if (MatchAddress(N, AM))
1199 assert (T == AM.Segment);
1202 MVT VT = N.getValueType();
1203 unsigned Complexity = 0;
1204 if (AM.BaseType == X86ISelAddressMode::RegBase)
1205 if (AM.Base.Reg.getNode())
1208 AM.Base.Reg = CurDAG->getRegister(0, VT);
1209 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1212 if (AM.IndexReg.getNode())
1215 AM.IndexReg = CurDAG->getRegister(0, VT);
1217 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1222 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1223 // to a LEA. This is determined with some expermentation but is by no means
1224 // optimal (especially for code size consideration). LEA is nice because of
1225 // its three-address nature. Tweak the cost function again when we can run
1226 // convertToThreeAddress() at register allocation time.
1227 if (AM.hasSymbolicDisplacement()) {
1228 // For X86-64, we should always use lea to materialize RIP relative
1230 if (Subtarget->is64Bit())
1236 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
1239 if (Complexity > 2) {
1241 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1247 bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
1248 SDValue &Base, SDValue &Scale,
1249 SDValue &Index, SDValue &Disp,
1251 if (ISD::isNON_EXTLoad(N.getNode()) &&
1253 IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode()))
1254 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
1258 /// getGlobalBaseReg - Return an SDNode that returns the value of
1259 /// the global base register. Output instructions required to
1260 /// initialize the global base register, if necessary.
1262 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1263 MachineFunction *MF = CurBB->getParent();
1264 unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF);
1265 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1268 static SDNode *FindCallStartFromCall(SDNode *Node) {
1269 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1270 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1271 "Node doesn't have a token chain argument!");
1272 return FindCallStartFromCall(Node->getOperand(0).getNode());
1275 /// getTruncateTo8Bit - return an SDNode that implements a subreg based
1276 /// truncate of the specified operand to i8. This can be done with tablegen,
1277 /// except that this code uses MVT::Flag in a tricky way that happens to
1278 /// improve scheduling in some cases.
1279 SDNode *X86DAGToDAGISel::getTruncateTo8Bit(SDValue N0) {
1280 assert(!Subtarget->is64Bit() &&
1281 "getTruncateTo8Bit is only needed on x86-32!");
1282 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1283 DebugLoc dl = N0.getDebugLoc();
1285 // Ensure that the source register has an 8-bit subreg on 32-bit targets
1287 MVT N0VT = N0.getValueType();
1288 switch (N0VT.getSimpleVT()) {
1289 default: assert(0 && "Unknown truncate!");
1291 Opc = X86::MOV16to16_;
1294 Opc = X86::MOV32to32_;
1298 // The use of MVT::Flag here is not strictly accurate, but it helps
1299 // scheduling in some cases.
1300 N0 = SDValue(CurDAG->getTargetNode(Opc, dl, N0VT, MVT::Flag, N0), 0);
1301 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
1302 MVT::i8, N0, SRIdx, N0.getValue(1));
1305 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1306 SDValue Chain = Node->getOperand(0);
1307 SDValue In1 = Node->getOperand(1);
1308 SDValue In2L = Node->getOperand(2);
1309 SDValue In2H = Node->getOperand(3);
1310 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1311 if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1313 SDValue LSI = Node->getOperand(4); // MemOperand
1314 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain};
1315 return CurDAG->getTargetNode(Opc, Node->getDebugLoc(),
1316 MVT::i32, MVT::i32, MVT::Other, Ops,
1317 array_lengthof(Ops));
1320 SDNode *X86DAGToDAGISel::Select(SDValue N) {
1321 SDNode *Node = N.getNode();
1322 MVT NVT = Node->getValueType(0);
1324 unsigned Opcode = Node->getOpcode();
1325 DebugLoc dl = Node->getDebugLoc();
1328 DOUT << std::string(Indent, ' ') << "Selecting: ";
1329 DEBUG(Node->dump(CurDAG));
1334 if (Node->isMachineOpcode()) {
1336 DOUT << std::string(Indent-2, ' ') << "== ";
1337 DEBUG(Node->dump(CurDAG));
1341 return NULL; // Already selected.
1346 case X86ISD::GlobalBaseReg:
1347 return getGlobalBaseReg();
1349 case X86ISD::ATOMOR64_DAG:
1350 return SelectAtomic64(Node, X86::ATOMOR6432);
1351 case X86ISD::ATOMXOR64_DAG:
1352 return SelectAtomic64(Node, X86::ATOMXOR6432);
1353 case X86ISD::ATOMADD64_DAG:
1354 return SelectAtomic64(Node, X86::ATOMADD6432);
1355 case X86ISD::ATOMSUB64_DAG:
1356 return SelectAtomic64(Node, X86::ATOMSUB6432);
1357 case X86ISD::ATOMNAND64_DAG:
1358 return SelectAtomic64(Node, X86::ATOMNAND6432);
1359 case X86ISD::ATOMAND64_DAG:
1360 return SelectAtomic64(Node, X86::ATOMAND6432);
1361 case X86ISD::ATOMSWAP64_DAG:
1362 return SelectAtomic64(Node, X86::ATOMSWAP6432);
1364 case ISD::SMUL_LOHI:
1365 case ISD::UMUL_LOHI: {
1366 SDValue N0 = Node->getOperand(0);
1367 SDValue N1 = Node->getOperand(1);
1369 bool isSigned = Opcode == ISD::SMUL_LOHI;
1371 switch (NVT.getSimpleVT()) {
1372 default: assert(0 && "Unsupported VT!");
1373 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1374 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1375 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1376 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1379 switch (NVT.getSimpleVT()) {
1380 default: assert(0 && "Unsupported VT!");
1381 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1382 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1383 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1384 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1387 unsigned LoReg, HiReg;
1388 switch (NVT.getSimpleVT()) {
1389 default: assert(0 && "Unsupported VT!");
1390 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1391 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1392 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1393 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1396 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1397 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1398 // multiplty is commmutative
1400 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1405 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
1406 N0, SDValue()).getValue(1);
1409 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1412 CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1413 array_lengthof(Ops));
1414 InFlag = SDValue(CNode, 1);
1415 // Update the chain.
1416 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1419 SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1422 // Copy the low half of the result, if it is needed.
1423 if (!N.getValue(0).use_empty()) {
1424 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1425 LoReg, NVT, InFlag);
1426 InFlag = Result.getValue(2);
1427 ReplaceUses(N.getValue(0), Result);
1429 DOUT << std::string(Indent-2, ' ') << "=> ";
1430 DEBUG(Result.getNode()->dump(CurDAG));
1434 // Copy the high half of the result, if it is needed.
1435 if (!N.getValue(1).use_empty()) {
1437 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1438 // Prevent use of AH in a REX instruction by referencing AX instead.
1439 // Shift it down 8 bits.
1440 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1441 X86::AX, MVT::i16, InFlag);
1442 InFlag = Result.getValue(2);
1443 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
1445 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1446 // Then truncate it down to i8.
1447 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1448 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
1449 MVT::i8, Result, SRIdx), 0);
1451 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1452 HiReg, NVT, InFlag);
1453 InFlag = Result.getValue(2);
1455 ReplaceUses(N.getValue(1), Result);
1457 DOUT << std::string(Indent-2, ' ') << "=> ";
1458 DEBUG(Result.getNode()->dump(CurDAG));
1471 case ISD::UDIVREM: {
1472 SDValue N0 = Node->getOperand(0);
1473 SDValue N1 = Node->getOperand(1);
1475 bool isSigned = Opcode == ISD::SDIVREM;
1477 switch (NVT.getSimpleVT()) {
1478 default: assert(0 && "Unsupported VT!");
1479 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1480 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1481 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1482 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1485 switch (NVT.getSimpleVT()) {
1486 default: assert(0 && "Unsupported VT!");
1487 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1488 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1489 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1490 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1493 unsigned LoReg, HiReg;
1494 unsigned ClrOpcode, SExtOpcode;
1495 switch (NVT.getSimpleVT()) {
1496 default: assert(0 && "Unsupported VT!");
1498 LoReg = X86::AL; HiReg = X86::AH;
1500 SExtOpcode = X86::CBW;
1503 LoReg = X86::AX; HiReg = X86::DX;
1504 ClrOpcode = X86::MOV16r0;
1505 SExtOpcode = X86::CWD;
1508 LoReg = X86::EAX; HiReg = X86::EDX;
1509 ClrOpcode = X86::MOV32r0;
1510 SExtOpcode = X86::CDQ;
1513 LoReg = X86::RAX; HiReg = X86::RDX;
1514 ClrOpcode = X86::MOV64r0;
1515 SExtOpcode = X86::CQO;
1519 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1520 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1521 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
1524 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
1525 // Special case for div8, just use a move with zero extension to AX to
1526 // clear the upper 8 bits (AH).
1527 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
1528 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
1529 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
1531 SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, MVT::i16,
1533 array_lengthof(Ops)), 0);
1534 Chain = Move.getValue(1);
1535 ReplaceUses(N0.getValue(1), Chain);
1538 SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0);
1539 Chain = CurDAG->getEntryNode();
1541 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue());
1542 InFlag = Chain.getValue(1);
1545 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
1546 LoReg, N0, SDValue()).getValue(1);
1547 if (isSigned && !signBitIsZero) {
1548 // Sign extend the low part into the high part.
1550 SDValue(CurDAG->getTargetNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
1552 // Zero out the high part, effectively zero extending the input.
1553 SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, dl, NVT),
1555 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg,
1556 ClrNode, InFlag).getValue(1);
1561 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1564 CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1565 array_lengthof(Ops));
1566 InFlag = SDValue(CNode, 1);
1567 // Update the chain.
1568 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1571 SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1574 // Copy the division (low) result, if it is needed.
1575 if (!N.getValue(0).use_empty()) {
1576 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1577 LoReg, NVT, InFlag);
1578 InFlag = Result.getValue(2);
1579 ReplaceUses(N.getValue(0), Result);
1581 DOUT << std::string(Indent-2, ' ') << "=> ";
1582 DEBUG(Result.getNode()->dump(CurDAG));
1586 // Copy the remainder (high) result, if it is needed.
1587 if (!N.getValue(1).use_empty()) {
1589 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1590 // Prevent use of AH in a REX instruction by referencing AX instead.
1591 // Shift it down 8 bits.
1592 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1593 X86::AX, MVT::i16, InFlag);
1594 InFlag = Result.getValue(2);
1595 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
1597 CurDAG->getTargetConstant(8, MVT::i8)),
1599 // Then truncate it down to i8.
1600 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1601 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
1602 MVT::i8, Result, SRIdx), 0);
1604 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1605 HiReg, NVT, InFlag);
1606 InFlag = Result.getValue(2);
1608 ReplaceUses(N.getValue(1), Result);
1610 DOUT << std::string(Indent-2, ' ') << "=> ";
1611 DEBUG(Result.getNode()->dump(CurDAG));
1623 case ISD::SIGN_EXTEND_INREG: {
1624 MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
1625 if (SVT == MVT::i8 && !Subtarget->is64Bit()) {
1626 SDValue N0 = Node->getOperand(0);
1628 SDValue TruncOp = SDValue(getTruncateTo8Bit(N0), 0);
1630 switch (NVT.getSimpleVT()) {
1631 default: assert(0 && "Unknown sign_extend_inreg!");
1633 Opc = X86::MOVSX16rr8;
1636 Opc = X86::MOVSX32rr8;
1640 SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, NVT, TruncOp);
1643 DOUT << std::string(Indent-2, ' ') << "=> ";
1644 DEBUG(TruncOp.getNode()->dump(CurDAG));
1646 DOUT << std::string(Indent-2, ' ') << "=> ";
1647 DEBUG(ResNode->dump(CurDAG));
1656 case ISD::TRUNCATE: {
1657 if (NVT == MVT::i8 && !Subtarget->is64Bit()) {
1658 SDValue Input = Node->getOperand(0);
1659 SDNode *ResNode = getTruncateTo8Bit(Input);
1662 DOUT << std::string(Indent-2, ' ') << "=> ";
1663 DEBUG(ResNode->dump(CurDAG));
1672 case ISD::DECLARE: {
1673 // Handle DECLARE nodes here because the second operand may have been
1674 // wrapped in X86ISD::Wrapper.
1675 SDValue Chain = Node->getOperand(0);
1676 SDValue N1 = Node->getOperand(1);
1677 SDValue N2 = Node->getOperand(2);
1678 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
1680 // FIXME: We need to handle this for VLAs.
1682 ReplaceUses(N.getValue(0), Chain);
1686 if (N2.getOpcode() == ISD::ADD &&
1687 N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
1688 N2 = N2.getOperand(1);
1690 // If N2 is not Wrapper(decriptor) then the llvm.declare is mangled
1691 // somehow, just ignore it.
1692 if (N2.getOpcode() != X86ISD::Wrapper) {
1693 ReplaceUses(N.getValue(0), Chain);
1696 GlobalAddressSDNode *GVNode =
1697 dyn_cast<GlobalAddressSDNode>(N2.getOperand(0));
1699 ReplaceUses(N.getValue(0), Chain);
1702 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
1703 TLI.getPointerTy());
1704 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GVNode->getGlobal(),
1705 TLI.getPointerTy());
1706 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1707 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
1709 array_lengthof(Ops));
1713 SDNode *ResNode = SelectCode(N);
1716 DOUT << std::string(Indent-2, ' ') << "=> ";
1717 if (ResNode == NULL || ResNode == N.getNode())
1718 DEBUG(N.getNode()->dump(CurDAG));
1720 DEBUG(ResNode->dump(CurDAG));
1728 bool X86DAGToDAGISel::
1729 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1730 std::vector<SDValue> &OutOps) {
1731 SDValue Op0, Op1, Op2, Op3, Op4;
1732 switch (ConstraintCode) {
1733 case 'o': // offsetable ??
1734 case 'v': // not offsetable ??
1735 default: return true;
1737 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3, Op4))
1742 OutOps.push_back(Op0);
1743 OutOps.push_back(Op1);
1744 OutOps.push_back(Op2);
1745 OutOps.push_back(Op3);
1746 OutOps.push_back(Op4);
1750 /// createX86ISelDag - This pass converts a legalized DAG into a
1751 /// X86-specific DAG, ready for instruction scheduling.
1753 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1754 return new X86DAGToDAGISel(TM, Fast);