1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Streams.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/ADT/SmallPtrSet.h"
43 #include "llvm/ADT/Statistic.h"
46 #include "llvm/Support/CommandLine.h"
47 static cl::opt<bool> AvoidDupAddrCompute("x86-avoid-dup-address", cl::Hidden);
49 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
51 //===----------------------------------------------------------------------===//
52 // Pattern Matcher Implementation
53 //===----------------------------------------------------------------------===//
56 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
57 /// SDValue's instead of register numbers for the leaves of the matched
59 struct X86ISelAddressMode {
65 struct { // This is really a union, discriminated by BaseType!
78 unsigned Align; // CP alignment.
79 unsigned char SymbolFlags; // X86II::MO_*
82 : BaseType(RegBase), Scale(1), IndexReg(), Disp(0),
83 Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0), SymbolFlags(0) {
86 bool hasSymbolicDisplacement() const {
87 return GV != 0 || CP != 0 || ES != 0 || JT != -1;
90 bool hasBaseOrIndexReg() const {
91 return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0;
94 /// isRIPRelative - Return true if this addressing mode is already RIP
96 bool isRIPRelative() const {
97 if (BaseType != RegBase) return false;
98 if (RegisterSDNode *RegNode =
99 dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode()))
100 return RegNode->getReg() == X86::RIP;
104 void setBaseReg(SDValue Reg) {
110 cerr << "X86ISelAddressMode " << this << "\n";
112 if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
114 cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
115 cerr << " Scale" << Scale << "\n";
117 if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
119 cerr << " Disp " << Disp << "\n";
120 cerr << "GV "; if (GV) GV->dump();
122 cerr << " CP "; if (CP) CP->dump();
125 cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
126 cerr << " JT" << JT << " Align" << Align << "\n";
132 //===--------------------------------------------------------------------===//
133 /// ISel - X86 specific code to select X86 machine instructions for
134 /// SelectionDAG operations.
136 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
137 /// X86Lowering - This object fully describes how to lower LLVM code to an
138 /// X86-specific SelectionDAG.
139 X86TargetLowering &X86Lowering;
141 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
142 /// make the right decision when generating code for different targets.
143 const X86Subtarget *Subtarget;
145 /// CurBB - Current BB being isel'd.
147 MachineBasicBlock *CurBB;
149 /// OptForSize - If true, selector should try to optimize for code size
150 /// instead of performance.
154 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
155 : SelectionDAGISel(tm, OptLevel),
156 X86Lowering(*tm.getTargetLowering()),
157 Subtarget(&tm.getSubtarget<X86Subtarget>()),
160 virtual const char *getPassName() const {
161 return "X86 DAG->DAG Instruction Selection";
164 /// InstructionSelect - This callback is invoked by
165 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
166 virtual void InstructionSelect();
168 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
171 bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const;
173 // Include the pieces autogenerated from the target description.
174 #include "X86GenDAGISel.inc"
177 SDNode *Select(SDValue N);
178 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
180 bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
181 bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
182 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
183 bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
185 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
186 bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
187 SDValue &Scale, SDValue &Index, SDValue &Disp,
189 bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
190 SDValue &Scale, SDValue &Index, SDValue &Disp);
191 bool SelectTLSADDRAddr(SDValue Op, SDValue N, SDValue &Base,
192 SDValue &Scale, SDValue &Index, SDValue &Disp);
193 bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
194 SDValue N, SDValue &Base, SDValue &Scale,
195 SDValue &Index, SDValue &Disp,
197 SDValue &InChain, SDValue &OutChain);
198 bool TryFoldLoad(SDValue P, SDValue N,
199 SDValue &Base, SDValue &Scale,
200 SDValue &Index, SDValue &Disp,
202 void PreprocessForRMW();
203 void PreprocessForFPConvert();
205 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
206 /// inline asm expressions.
207 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
209 std::vector<SDValue> &OutOps);
211 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
213 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
214 SDValue &Scale, SDValue &Index,
215 SDValue &Disp, SDValue &Segment) {
216 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
217 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
219 Scale = getI8Imm(AM.Scale);
221 // These are 32-bit even in 64-bit mode since RIP relative offset
224 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
227 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
228 AM.Align, AM.Disp, AM.SymbolFlags);
230 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
231 else if (AM.JT != -1)
232 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
234 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
236 if (AM.Segment.getNode())
237 Segment = AM.Segment;
239 Segment = CurDAG->getRegister(0, MVT::i32);
242 /// getI8Imm - Return a target constant with the specified value, of type
244 inline SDValue getI8Imm(unsigned Imm) {
245 return CurDAG->getTargetConstant(Imm, MVT::i8);
248 /// getI16Imm - Return a target constant with the specified value, of type
250 inline SDValue getI16Imm(unsigned Imm) {
251 return CurDAG->getTargetConstant(Imm, MVT::i16);
254 /// getI32Imm - Return a target constant with the specified value, of type
256 inline SDValue getI32Imm(unsigned Imm) {
257 return CurDAG->getTargetConstant(Imm, MVT::i32);
260 /// getGlobalBaseReg - Return an SDNode that returns the value of
261 /// the global base register. Output instructions required to
262 /// initialize the global base register, if necessary.
264 SDNode *getGlobalBaseReg();
266 /// getTargetMachine - Return a reference to the TargetMachine, casted
267 /// to the target-specific type.
268 const X86TargetMachine &getTargetMachine() {
269 return static_cast<const X86TargetMachine &>(TM);
272 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
273 /// to the target-specific type.
274 const X86InstrInfo *getInstrInfo() {
275 return getTargetMachine().getInstrInfo();
285 bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
286 SDNode *Root) const {
287 if (OptLevel == CodeGenOpt::None) return false;
290 switch (U->getOpcode()) {
298 SDValue Op1 = U->getOperand(1);
300 // If the other operand is a 8-bit immediate we should fold the immediate
301 // instead. This reduces code size.
303 // movl 4(%esp), %eax
307 // addl 4(%esp), %eax
308 // The former is 2 bytes shorter. In case where the increment is 1, then
309 // the saving can be 4 bytes (by using incl %eax).
310 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
311 if (Imm->getAPIntValue().isSignedIntN(8))
314 // If the other operand is a TLS address, we should fold it instead.
317 // leal i@NTPOFF(%eax), %eax
319 // movl $i@NTPOFF, %eax
321 // if the block also has an access to a second TLS address this will save
323 // FIXME: This is probably also true for non TLS addresses.
324 if (Op1.getOpcode() == X86ISD::Wrapper) {
325 SDValue Val = Op1.getOperand(0);
326 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
332 // Proceed to 'generic' cycle finder code
333 return SelectionDAGISel::IsLegalAndProfitableToFold(N, U, Root);
336 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
337 /// and move load below the TokenFactor. Replace store's chain operand with
338 /// load's chain result.
339 static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load,
340 SDValue Store, SDValue TF) {
341 SmallVector<SDValue, 4> Ops;
342 for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i)
343 if (Load.getNode() == TF.getOperand(i).getNode())
344 Ops.push_back(Load.getOperand(0));
346 Ops.push_back(TF.getOperand(i));
347 CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size());
348 CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
349 CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
350 Store.getOperand(2), Store.getOperand(3));
353 /// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
355 static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address,
357 if (N.getOpcode() == ISD::BIT_CONVERT)
360 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
361 if (!LD || LD->isVolatile())
363 if (LD->getAddressingMode() != ISD::UNINDEXED)
366 ISD::LoadExtType ExtType = LD->getExtensionType();
367 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
371 N.getOperand(1) == Address &&
372 N.getNode()->isOperandOf(Chain.getNode())) {
379 /// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
380 /// operand and move load below the call's chain operand.
381 static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
382 SDValue Call, SDValue CallSeqStart) {
383 SmallVector<SDValue, 8> Ops;
384 SDValue Chain = CallSeqStart.getOperand(0);
385 if (Chain.getNode() == Load.getNode())
386 Ops.push_back(Load.getOperand(0));
388 assert(Chain.getOpcode() == ISD::TokenFactor &&
389 "Unexpected CallSeqStart chain operand");
390 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
391 if (Chain.getOperand(i).getNode() == Load.getNode())
392 Ops.push_back(Load.getOperand(0));
394 Ops.push_back(Chain.getOperand(i));
396 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
397 MVT::Other, &Ops[0], Ops.size());
399 Ops.push_back(NewChain);
401 for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i)
402 Ops.push_back(CallSeqStart.getOperand(i));
403 CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size());
404 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
405 Load.getOperand(1), Load.getOperand(2));
407 Ops.push_back(SDValue(Load.getNode(), 1));
408 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
409 Ops.push_back(Call.getOperand(i));
410 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
413 /// isCalleeLoad - Return true if call address is a load and it can be
414 /// moved below CALLSEQ_START and the chains leading up to the call.
415 /// Return the CALLSEQ_START by reference as a second output.
416 static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
417 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
419 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
422 LD->getAddressingMode() != ISD::UNINDEXED ||
423 LD->getExtensionType() != ISD::NON_EXTLOAD)
426 // Now let's find the callseq_start.
427 while (Chain.getOpcode() != ISD::CALLSEQ_START) {
428 if (!Chain.hasOneUse())
430 Chain = Chain.getOperand(0);
433 if (Chain.getOperand(0).getNode() == Callee.getNode())
435 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
436 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()))
442 /// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
443 /// This is only run if not in -O0 mode.
444 /// This allows the instruction selector to pick more read-modify-write
445 /// instructions. This is a common case:
455 /// [TokenFactor] [Op]
462 /// The fact the store's chain operand != load's chain will prevent the
463 /// (store (op (load))) instruction from being selected. We can transform it to:
482 void X86DAGToDAGISel::PreprocessForRMW() {
483 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
484 E = CurDAG->allnodes_end(); I != E; ++I) {
485 if (I->getOpcode() == X86ISD::CALL) {
486 /// Also try moving call address load from outside callseq_start to just
487 /// before the call to allow it to be folded.
505 SDValue Chain = I->getOperand(0);
506 SDValue Load = I->getOperand(1);
507 if (!isCalleeLoad(Load, Chain))
509 MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain);
514 if (!ISD::isNON_TRUNCStore(I))
516 SDValue Chain = I->getOperand(0);
518 if (Chain.getNode()->getOpcode() != ISD::TokenFactor)
521 SDValue N1 = I->getOperand(1);
522 SDValue N2 = I->getOperand(2);
523 if ((N1.getValueType().isFloatingPoint() &&
524 !N1.getValueType().isVector()) ||
530 unsigned Opcode = N1.getNode()->getOpcode();
539 case ISD::VECTOR_SHUFFLE: {
540 SDValue N10 = N1.getOperand(0);
541 SDValue N11 = N1.getOperand(1);
542 RModW = isRMWLoad(N10, Chain, N2, Load);
544 RModW = isRMWLoad(N11, Chain, N2, Load);
557 SDValue N10 = N1.getOperand(0);
558 RModW = isRMWLoad(N10, Chain, N2, Load);
564 MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain);
571 /// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
572 /// nodes that target the FP stack to be store and load to the stack. This is a
573 /// gross hack. We would like to simply mark these as being illegal, but when
574 /// we do that, legalize produces these when it expands calls, then expands
575 /// these in the same legalize pass. We would like dag combine to be able to
576 /// hack on these between the call expansion and the node legalization. As such
577 /// this pass basically does "really late" legalization of these inline with the
579 void X86DAGToDAGISel::PreprocessForFPConvert() {
580 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
581 E = CurDAG->allnodes_end(); I != E; ) {
582 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
583 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
586 // If the source and destination are SSE registers, then this is a legal
587 // conversion that should not be lowered.
588 MVT SrcVT = N->getOperand(0).getValueType();
589 MVT DstVT = N->getValueType(0);
590 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
591 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
592 if (SrcIsSSE && DstIsSSE)
595 if (!SrcIsSSE && !DstIsSSE) {
596 // If this is an FPStack extension, it is a noop.
597 if (N->getOpcode() == ISD::FP_EXTEND)
599 // If this is a value-preserving FPStack truncation, it is a noop.
600 if (N->getConstantOperandVal(1))
604 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
605 // FPStack has extload and truncstore. SSE can fold direct loads into other
606 // operations. Based on this, decide what we want to do.
608 if (N->getOpcode() == ISD::FP_ROUND)
609 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
611 MemVT = SrcIsSSE ? SrcVT : DstVT;
613 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
614 DebugLoc dl = N->getDebugLoc();
616 // FIXME: optimize the case where the src/dest is a load or store?
617 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
619 MemTmp, NULL, 0, MemVT);
620 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
623 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
624 // extload we created. This will cause general havok on the dag because
625 // anything below the conversion could be folded into other existing nodes.
626 // To avoid invalidating 'I', back it up to the convert node.
628 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
630 // Now that we did that, the node is dead. Increment the iterator to the
631 // next node to process, then delete N.
633 CurDAG->DeleteNode(N);
637 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
638 /// when it has created a SelectionDAG for us to codegen.
639 void X86DAGToDAGISel::InstructionSelect() {
640 CurBB = BB; // BB can change as result of isel.
641 const Function *F = CurDAG->getMachineFunction().getFunction();
642 OptForSize = F->hasFnAttr(Attribute::OptimizeForSize);
645 if (OptLevel != CodeGenOpt::None)
648 // FIXME: This should only happen when not compiled with -O0.
649 PreprocessForFPConvert();
651 // Codegen the basic block.
653 DOUT << "===== Instruction selection begins:\n";
658 DOUT << "===== Instruction selection ends:\n";
661 CurDAG->RemoveDeadNodes();
664 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
665 /// the main function.
666 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
667 MachineFrameInfo *MFI) {
668 const TargetInstrInfo *TII = TM.getInstrInfo();
669 if (Subtarget->isTargetCygMing())
670 BuildMI(BB, DebugLoc::getUnknownLoc(),
671 TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
674 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
675 // If this is main, emit special code for main.
676 MachineBasicBlock *BB = MF.begin();
677 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
678 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
682 bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N,
683 X86ISelAddressMode &AM) {
684 assert(N.getOpcode() == X86ISD::SegmentBaseAddress);
685 SDValue Segment = N.getOperand(0);
687 if (AM.Segment.getNode() == 0) {
688 AM.Segment = Segment;
695 bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
696 // This optimization is valid because the GNU TLS model defines that
697 // gs:0 (or fs:0 on X86-64) contains its own address.
698 // For more information see http://people.redhat.com/drepper/tls.pdf
700 SDValue Address = N.getOperand(1);
701 if (Address.getOpcode() == X86ISD::SegmentBaseAddress &&
702 !MatchSegmentBaseAddress (Address, AM))
708 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
709 /// into an addressing mode. These wrap things that will resolve down into a
710 /// symbol reference. If no match is possible, this returns true, otherwise it
712 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
713 // If the addressing mode already has a symbol as the displacement, we can
714 // never match another symbol.
715 if (AM.hasSymbolicDisplacement())
718 SDValue N0 = N.getOperand(0);
720 // Handle X86-64 rip-relative addresses. We check this before checking direct
721 // folding because RIP is preferable to non-RIP accesses.
722 if (Subtarget->is64Bit() &&
723 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
724 // they cannot be folded into immediate fields.
725 // FIXME: This can be improved for kernel and other models?
726 TM.getCodeModel() == CodeModel::Small &&
728 // Base and index reg must be 0 in order to use %rip as base and lowering
730 !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
732 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
733 int64_t Offset = AM.Disp + G->getOffset();
734 if (!isInt32(Offset)) return true;
735 AM.GV = G->getGlobal();
737 AM.SymbolFlags = G->getTargetFlags();
738 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
739 int64_t Offset = AM.Disp + CP->getOffset();
740 if (!isInt32(Offset)) return true;
741 AM.CP = CP->getConstVal();
742 AM.Align = CP->getAlignment();
744 AM.SymbolFlags = CP->getTargetFlags();
745 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
746 AM.ES = S->getSymbol();
747 AM.SymbolFlags = S->getTargetFlags();
749 JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
750 AM.JT = J->getIndex();
751 AM.SymbolFlags = J->getTargetFlags();
754 if (N.getOpcode() == X86ISD::WrapperRIP)
755 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
759 // Handle the case when globals fit in our immediate field: This is true for
760 // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
761 // mode, this results in a non-RIP-relative computation.
762 if (!Subtarget->is64Bit() ||
763 (TM.getCodeModel() == CodeModel::Small &&
764 TM.getRelocationModel() == Reloc::Static)) {
765 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
766 AM.GV = G->getGlobal();
767 AM.Disp += G->getOffset();
768 AM.SymbolFlags = G->getTargetFlags();
769 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
770 AM.CP = CP->getConstVal();
771 AM.Align = CP->getAlignment();
772 AM.Disp += CP->getOffset();
773 AM.SymbolFlags = CP->getTargetFlags();
774 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
775 AM.ES = S->getSymbol();
776 AM.SymbolFlags = S->getTargetFlags();
778 JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
779 AM.JT = J->getIndex();
780 AM.SymbolFlags = J->getTargetFlags();
788 /// MatchAddress - Add the specified node to the specified addressing mode,
789 /// returning true if it cannot be done. This just pattern matches for the
791 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
793 bool is64Bit = Subtarget->is64Bit();
794 DebugLoc dl = N.getDebugLoc();
795 DOUT << "MatchAddress: "; DEBUG(AM.dump());
798 return MatchAddressBase(N, AM);
800 // If this is already a %rip relative address, we can only merge immediates
801 // into it. Instead of handling this in every case, we handle it here.
802 // RIP relative addressing: %rip + 32-bit displacement!
803 if (AM.isRIPRelative()) {
804 // FIXME: JumpTable and ExternalSymbol address currently don't like
805 // displacements. It isn't very important, but this should be fixed for
807 if (!AM.ES && AM.JT != -1) return true;
809 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
810 int64_t Val = AM.Disp + Cst->getSExtValue();
819 switch (N.getOpcode()) {
821 case ISD::Constant: {
822 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
823 if (!is64Bit || isInt32(AM.Disp + Val)) {
830 case X86ISD::SegmentBaseAddress:
831 if (!MatchSegmentBaseAddress(N, AM))
835 case X86ISD::Wrapper:
836 case X86ISD::WrapperRIP:
837 if (!MatchWrapper(N, AM))
842 if (!MatchLoad(N, AM))
846 case ISD::FrameIndex:
847 if (AM.BaseType == X86ISelAddressMode::RegBase
848 && AM.Base.Reg.getNode() == 0) {
849 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
850 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
856 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
860 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
861 unsigned Val = CN->getZExtValue();
862 if (Val == 1 || Val == 2 || Val == 3) {
864 SDValue ShVal = N.getNode()->getOperand(0);
866 // Okay, we know that we have a scale by now. However, if the scaled
867 // value is an add of something and a constant, we can fold the
868 // constant into the disp field here.
869 if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
870 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
871 AM.IndexReg = ShVal.getNode()->getOperand(0);
872 ConstantSDNode *AddVal =
873 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
874 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
875 if (!is64Bit || isInt32(Disp))
889 // A mul_lohi where we need the low part can be folded as a plain multiply.
890 if (N.getResNo() != 0) break;
893 case X86ISD::MUL_IMM:
894 // X*[3,5,9] -> X+X*[2,4,8]
895 if (AM.BaseType == X86ISelAddressMode::RegBase &&
896 AM.Base.Reg.getNode() == 0 &&
897 AM.IndexReg.getNode() == 0) {
899 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
900 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
901 CN->getZExtValue() == 9) {
902 AM.Scale = unsigned(CN->getZExtValue())-1;
904 SDValue MulVal = N.getNode()->getOperand(0);
907 // Okay, we know that we have a scale by now. However, if the scaled
908 // value is an add of something and a constant, we can fold the
909 // constant into the disp field here.
910 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
911 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
912 Reg = MulVal.getNode()->getOperand(0);
913 ConstantSDNode *AddVal =
914 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
915 uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
917 if (!is64Bit || isInt32(Disp))
920 Reg = N.getNode()->getOperand(0);
922 Reg = N.getNode()->getOperand(0);
925 AM.IndexReg = AM.Base.Reg = Reg;
932 // Given A-B, if A can be completely folded into the address and
933 // the index field with the index field unused, use -B as the index.
934 // This is a win if a has multiple parts that can be folded into
935 // the address. Also, this saves a mov if the base register has
936 // other uses, since it avoids a two-address sub instruction, however
937 // it costs an additional mov if the index register has other uses.
939 // Test if the LHS of the sub can be folded.
940 X86ISelAddressMode Backup = AM;
941 if (MatchAddress(N.getNode()->getOperand(0), AM, Depth+1)) {
945 // Test if the index field is free for use.
946 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
951 SDValue RHS = N.getNode()->getOperand(1);
952 // If the RHS involves a register with multiple uses, this
953 // transformation incurs an extra mov, due to the neg instruction
954 // clobbering its operand.
955 if (!RHS.getNode()->hasOneUse() ||
956 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
957 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
958 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
959 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
960 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
962 // If the base is a register with multiple uses, this
963 // transformation may save a mov.
964 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
965 AM.Base.Reg.getNode() &&
966 !AM.Base.Reg.getNode()->hasOneUse()) ||
967 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
969 // If the folded LHS was interesting, this transformation saves
970 // address arithmetic.
971 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
972 ((AM.Disp != 0) && (Backup.Disp == 0)) +
973 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
975 // If it doesn't look like it may be an overall win, don't do it.
981 // Ok, the transformation is legal and appears profitable. Go for it.
982 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
983 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
987 // Insert the new nodes into the topological ordering.
988 if (Zero.getNode()->getNodeId() == -1 ||
989 Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
990 CurDAG->RepositionNode(N.getNode(), Zero.getNode());
991 Zero.getNode()->setNodeId(N.getNode()->getNodeId());
993 if (Neg.getNode()->getNodeId() == -1 ||
994 Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
995 CurDAG->RepositionNode(N.getNode(), Neg.getNode());
996 Neg.getNode()->setNodeId(N.getNode()->getNodeId());
1002 X86ISelAddressMode Backup = AM;
1003 if (!MatchAddress(N.getNode()->getOperand(0), AM, Depth+1) &&
1004 !MatchAddress(N.getNode()->getOperand(1), AM, Depth+1))
1007 if (!MatchAddress(N.getNode()->getOperand(1), AM, Depth+1) &&
1008 !MatchAddress(N.getNode()->getOperand(0), AM, Depth+1))
1012 // If we couldn't fold both operands into the address at the same time,
1013 // see if we can just put each operand into a register and fold at least
1015 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1016 !AM.Base.Reg.getNode() &&
1017 !AM.IndexReg.getNode()) {
1018 AM.Base.Reg = N.getNode()->getOperand(0);
1019 AM.IndexReg = N.getNode()->getOperand(1);
1027 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1028 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1029 X86ISelAddressMode Backup = AM;
1030 uint64_t Offset = CN->getSExtValue();
1031 // Start with the LHS as an addr mode.
1032 if (!MatchAddress(N.getOperand(0), AM, Depth+1) &&
1033 // Address could not have picked a GV address for the displacement.
1035 // On x86-64, the resultant disp must fit in 32-bits.
1036 (!is64Bit || isInt32(AM.Disp + Offset)) &&
1037 // Check to see if the LHS & C is zero.
1038 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
1047 // Perform some heroic transforms on an and of a constant-count shift
1048 // with a constant to enable use of the scaled offset field.
1050 SDValue Shift = N.getOperand(0);
1051 if (Shift.getNumOperands() != 2) break;
1053 // Scale must not be used already.
1054 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1056 SDValue X = Shift.getOperand(0);
1057 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
1058 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
1059 if (!C1 || !C2) break;
1061 // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This
1062 // allows us to convert the shift and and into an h-register extract and
1064 if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) {
1065 unsigned ScaleLog = 8 - C1->getZExtValue();
1066 if (ScaleLog > 0 && ScaleLog < 4 &&
1067 C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
1068 SDValue Eight = CurDAG->getConstant(8, MVT::i8);
1069 SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
1070 SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1072 SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
1074 SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
1075 SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1078 // Insert the new nodes into the topological ordering.
1079 if (Eight.getNode()->getNodeId() == -1 ||
1080 Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1081 CurDAG->RepositionNode(X.getNode(), Eight.getNode());
1082 Eight.getNode()->setNodeId(X.getNode()->getNodeId());
1084 if (Mask.getNode()->getNodeId() == -1 ||
1085 Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1086 CurDAG->RepositionNode(X.getNode(), Mask.getNode());
1087 Mask.getNode()->setNodeId(X.getNode()->getNodeId());
1089 if (Srl.getNode()->getNodeId() == -1 ||
1090 Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1091 CurDAG->RepositionNode(Shift.getNode(), Srl.getNode());
1092 Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
1094 if (And.getNode()->getNodeId() == -1 ||
1095 And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1096 CurDAG->RepositionNode(N.getNode(), And.getNode());
1097 And.getNode()->setNodeId(N.getNode()->getNodeId());
1099 if (ShlCount.getNode()->getNodeId() == -1 ||
1100 ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1101 CurDAG->RepositionNode(X.getNode(), ShlCount.getNode());
1102 ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
1104 if (Shl.getNode()->getNodeId() == -1 ||
1105 Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1106 CurDAG->RepositionNode(N.getNode(), Shl.getNode());
1107 Shl.getNode()->setNodeId(N.getNode()->getNodeId());
1109 CurDAG->ReplaceAllUsesWith(N, Shl);
1111 AM.Scale = (1 << ScaleLog);
1116 // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
1117 // allows us to fold the shift into this addressing mode.
1118 if (Shift.getOpcode() != ISD::SHL) break;
1120 // Not likely to be profitable if either the AND or SHIFT node has more
1121 // than one use (unless all uses are for address computation). Besides,
1122 // isel mechanism requires their node ids to be reused.
1123 if (!N.hasOneUse() || !Shift.hasOneUse())
1126 // Verify that the shift amount is something we can fold.
1127 unsigned ShiftCst = C1->getZExtValue();
1128 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
1131 // Get the new AND mask, this folds to a constant.
1132 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1133 SDValue(C2, 0), SDValue(C1, 0));
1134 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
1136 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1137 NewAND, SDValue(C1, 0));
1139 // Insert the new nodes into the topological ordering.
1140 if (C1->getNodeId() > X.getNode()->getNodeId()) {
1141 CurDAG->RepositionNode(X.getNode(), C1);
1142 C1->setNodeId(X.getNode()->getNodeId());
1144 if (NewANDMask.getNode()->getNodeId() == -1 ||
1145 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1146 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
1147 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
1149 if (NewAND.getNode()->getNodeId() == -1 ||
1150 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1151 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
1152 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
1154 if (NewSHIFT.getNode()->getNodeId() == -1 ||
1155 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1156 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
1157 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
1160 CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
1162 AM.Scale = 1 << ShiftCst;
1163 AM.IndexReg = NewAND;
1168 return MatchAddressBase(N, AM);
1171 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1172 /// specified addressing mode without any further recursion.
1173 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1174 // Is the base register already occupied?
1175 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
1176 // If so, check to see if the scale index register is set.
1177 if (AM.IndexReg.getNode() == 0) {
1183 // Otherwise, we cannot select it.
1187 // Default, generate it as a register.
1188 AM.BaseType = X86ISelAddressMode::RegBase;
1193 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1194 /// It returns the operands which make up the maximal addressing mode it can
1195 /// match by reference.
1196 bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
1197 SDValue &Scale, SDValue &Index,
1198 SDValue &Disp, SDValue &Segment) {
1199 X86ISelAddressMode AM;
1201 if (AvoidDupAddrCompute && !N.hasOneUse()) {
1202 unsigned Opcode = N.getOpcode();
1203 if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex &&
1204 Opcode != X86ISD::Wrapper && Opcode != X86ISD::WrapperRIP) {
1205 // If we are able to fold N into addressing mode, then we'll allow it even
1206 // if N has multiple uses. In general, addressing computation is used as
1207 // addresses by all of its uses. But watch out for CopyToReg uses, that
1208 // means the address computation is liveout. It will be computed by a LEA
1209 // so we want to avoid computing the address twice.
1210 for (SDNode::use_iterator UI = N.getNode()->use_begin(),
1211 UE = N.getNode()->use_end(); UI != UE; ++UI) {
1212 if (UI->getOpcode() == ISD::CopyToReg) {
1213 MatchAddressBase(N, AM);
1221 if (!Done && MatchAddress(N, AM))
1224 MVT VT = N.getValueType();
1225 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1226 if (!AM.Base.Reg.getNode())
1227 AM.Base.Reg = CurDAG->getRegister(0, VT);
1230 if (!AM.IndexReg.getNode())
1231 AM.IndexReg = CurDAG->getRegister(0, VT);
1233 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1237 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1238 /// match a load whose top elements are either undef or zeros. The load flavor
1239 /// is derived from the type of N, which is either v4f32 or v2f64.
1240 bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
1241 SDValue N, SDValue &Base,
1242 SDValue &Scale, SDValue &Index,
1243 SDValue &Disp, SDValue &Segment,
1245 SDValue &OutChain) {
1246 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1247 InChain = N.getOperand(0).getValue(1);
1248 if (ISD::isNON_EXTLoad(InChain.getNode()) &&
1249 InChain.getValue(0).hasOneUse() &&
1251 IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) {
1252 LoadSDNode *LD = cast<LoadSDNode>(InChain);
1253 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1255 OutChain = LD->getChain();
1260 // Also handle the case where we explicitly require zeros in the top
1261 // elements. This is a vector shuffle from the zero vector.
1262 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1263 // Check to see if the top elements are all zeros (or bitcast of zeros).
1264 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1265 N.getOperand(0).getNode()->hasOneUse() &&
1266 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1267 N.getOperand(0).getOperand(0).hasOneUse()) {
1268 // Okay, this is a zero extending load. Fold it.
1269 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1270 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1272 OutChain = LD->getChain();
1273 InChain = SDValue(LD, 1);
1280 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1281 /// mode it matches can be cost effectively emitted as an LEA instruction.
1282 bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
1283 SDValue &Base, SDValue &Scale,
1284 SDValue &Index, SDValue &Disp) {
1285 X86ISelAddressMode AM;
1287 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1289 SDValue Copy = AM.Segment;
1290 SDValue T = CurDAG->getRegister(0, MVT::i32);
1292 if (MatchAddress(N, AM))
1294 assert (T == AM.Segment);
1297 MVT VT = N.getValueType();
1298 unsigned Complexity = 0;
1299 if (AM.BaseType == X86ISelAddressMode::RegBase)
1300 if (AM.Base.Reg.getNode())
1303 AM.Base.Reg = CurDAG->getRegister(0, VT);
1304 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1307 if (AM.IndexReg.getNode())
1310 AM.IndexReg = CurDAG->getRegister(0, VT);
1312 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1317 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1318 // to a LEA. This is determined with some expermentation but is by no means
1319 // optimal (especially for code size consideration). LEA is nice because of
1320 // its three-address nature. Tweak the cost function again when we can run
1321 // convertToThreeAddress() at register allocation time.
1322 if (AM.hasSymbolicDisplacement()) {
1323 // For X86-64, we should always use lea to materialize RIP relative
1325 if (Subtarget->is64Bit())
1331 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
1334 if (Complexity > 2) {
1336 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1342 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1343 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue Op, SDValue N, SDValue &Base,
1344 SDValue &Scale, SDValue &Index,
1346 assert(Op.getOpcode() == X86ISD::TLSADDR);
1347 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1348 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1350 X86ISelAddressMode AM;
1351 AM.GV = GA->getGlobal();
1352 AM.Disp += GA->getOffset();
1353 AM.Base.Reg = CurDAG->getRegister(0, N.getValueType());
1354 AM.SymbolFlags = GA->getTargetFlags();
1356 if (N.getValueType() == MVT::i32) {
1358 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1360 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1364 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1369 bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
1370 SDValue &Base, SDValue &Scale,
1371 SDValue &Index, SDValue &Disp,
1373 if (ISD::isNON_EXTLoad(N.getNode()) &&
1375 IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode()))
1376 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
1380 /// getGlobalBaseReg - Return an SDNode that returns the value of
1381 /// the global base register. Output instructions required to
1382 /// initialize the global base register, if necessary.
1384 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1385 MachineFunction *MF = CurBB->getParent();
1386 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1387 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1390 static SDNode *FindCallStartFromCall(SDNode *Node) {
1391 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1392 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1393 "Node doesn't have a token chain argument!");
1394 return FindCallStartFromCall(Node->getOperand(0).getNode());
1397 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1398 SDValue Chain = Node->getOperand(0);
1399 SDValue In1 = Node->getOperand(1);
1400 SDValue In2L = Node->getOperand(2);
1401 SDValue In2H = Node->getOperand(3);
1402 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1403 if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1405 SDValue LSI = Node->getOperand(4); // MemOperand
1406 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain};
1407 return CurDAG->getTargetNode(Opc, Node->getDebugLoc(),
1408 MVT::i32, MVT::i32, MVT::Other, Ops,
1409 array_lengthof(Ops));
1412 SDNode *X86DAGToDAGISel::Select(SDValue N) {
1413 SDNode *Node = N.getNode();
1414 MVT NVT = Node->getValueType(0);
1416 unsigned Opcode = Node->getOpcode();
1417 DebugLoc dl = Node->getDebugLoc();
1420 DOUT << std::string(Indent, ' ') << "Selecting: ";
1421 DEBUG(Node->dump(CurDAG));
1426 if (Node->isMachineOpcode()) {
1428 DOUT << std::string(Indent-2, ' ') << "== ";
1429 DEBUG(Node->dump(CurDAG));
1433 return NULL; // Already selected.
1438 case X86ISD::GlobalBaseReg:
1439 return getGlobalBaseReg();
1441 case X86ISD::ATOMOR64_DAG:
1442 return SelectAtomic64(Node, X86::ATOMOR6432);
1443 case X86ISD::ATOMXOR64_DAG:
1444 return SelectAtomic64(Node, X86::ATOMXOR6432);
1445 case X86ISD::ATOMADD64_DAG:
1446 return SelectAtomic64(Node, X86::ATOMADD6432);
1447 case X86ISD::ATOMSUB64_DAG:
1448 return SelectAtomic64(Node, X86::ATOMSUB6432);
1449 case X86ISD::ATOMNAND64_DAG:
1450 return SelectAtomic64(Node, X86::ATOMNAND6432);
1451 case X86ISD::ATOMAND64_DAG:
1452 return SelectAtomic64(Node, X86::ATOMAND6432);
1453 case X86ISD::ATOMSWAP64_DAG:
1454 return SelectAtomic64(Node, X86::ATOMSWAP6432);
1456 case ISD::SMUL_LOHI:
1457 case ISD::UMUL_LOHI: {
1458 SDValue N0 = Node->getOperand(0);
1459 SDValue N1 = Node->getOperand(1);
1461 bool isSigned = Opcode == ISD::SMUL_LOHI;
1463 switch (NVT.getSimpleVT()) {
1464 default: assert(0 && "Unsupported VT!");
1465 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1466 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1467 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1468 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1471 switch (NVT.getSimpleVT()) {
1472 default: assert(0 && "Unsupported VT!");
1473 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1474 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1475 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1476 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1479 unsigned LoReg, HiReg;
1480 switch (NVT.getSimpleVT()) {
1481 default: assert(0 && "Unsupported VT!");
1482 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1483 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1484 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1485 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1488 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1489 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1490 // multiplty is commmutative
1492 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1497 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
1498 N0, SDValue()).getValue(1);
1501 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1504 CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1505 array_lengthof(Ops));
1506 InFlag = SDValue(CNode, 1);
1507 // Update the chain.
1508 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1511 SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1514 // Copy the low half of the result, if it is needed.
1515 if (!N.getValue(0).use_empty()) {
1516 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1517 LoReg, NVT, InFlag);
1518 InFlag = Result.getValue(2);
1519 ReplaceUses(N.getValue(0), Result);
1521 DOUT << std::string(Indent-2, ' ') << "=> ";
1522 DEBUG(Result.getNode()->dump(CurDAG));
1526 // Copy the high half of the result, if it is needed.
1527 if (!N.getValue(1).use_empty()) {
1529 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1530 // Prevent use of AH in a REX instruction by referencing AX instead.
1531 // Shift it down 8 bits.
1532 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1533 X86::AX, MVT::i16, InFlag);
1534 InFlag = Result.getValue(2);
1535 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
1537 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1538 // Then truncate it down to i8.
1539 SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
1540 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
1541 MVT::i8, Result, SRIdx), 0);
1543 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1544 HiReg, NVT, InFlag);
1545 InFlag = Result.getValue(2);
1547 ReplaceUses(N.getValue(1), Result);
1549 DOUT << std::string(Indent-2, ' ') << "=> ";
1550 DEBUG(Result.getNode()->dump(CurDAG));
1563 case ISD::UDIVREM: {
1564 SDValue N0 = Node->getOperand(0);
1565 SDValue N1 = Node->getOperand(1);
1567 bool isSigned = Opcode == ISD::SDIVREM;
1569 switch (NVT.getSimpleVT()) {
1570 default: assert(0 && "Unsupported VT!");
1571 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1572 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1573 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1574 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1577 switch (NVT.getSimpleVT()) {
1578 default: assert(0 && "Unsupported VT!");
1579 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1580 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1581 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1582 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1585 unsigned LoReg, HiReg;
1586 unsigned ClrOpcode, SExtOpcode;
1587 switch (NVT.getSimpleVT()) {
1588 default: assert(0 && "Unsupported VT!");
1590 LoReg = X86::AL; HiReg = X86::AH;
1592 SExtOpcode = X86::CBW;
1595 LoReg = X86::AX; HiReg = X86::DX;
1596 ClrOpcode = X86::MOV16r0;
1597 SExtOpcode = X86::CWD;
1600 LoReg = X86::EAX; HiReg = X86::EDX;
1601 ClrOpcode = X86::MOV32r0;
1602 SExtOpcode = X86::CDQ;
1605 LoReg = X86::RAX; HiReg = X86::RDX;
1606 ClrOpcode = X86::MOV64r0;
1607 SExtOpcode = X86::CQO;
1611 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1612 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1613 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
1616 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
1617 // Special case for div8, just use a move with zero extension to AX to
1618 // clear the upper 8 bits (AH).
1619 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
1620 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
1621 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
1623 SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, MVT::i16,
1625 array_lengthof(Ops)), 0);
1626 Chain = Move.getValue(1);
1627 ReplaceUses(N0.getValue(1), Chain);
1630 SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0);
1631 Chain = CurDAG->getEntryNode();
1633 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue());
1634 InFlag = Chain.getValue(1);
1637 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
1638 LoReg, N0, SDValue()).getValue(1);
1639 if (isSigned && !signBitIsZero) {
1640 // Sign extend the low part into the high part.
1642 SDValue(CurDAG->getTargetNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
1644 // Zero out the high part, effectively zero extending the input.
1645 SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, dl, NVT),
1647 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg,
1648 ClrNode, InFlag).getValue(1);
1653 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1656 CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1657 array_lengthof(Ops));
1658 InFlag = SDValue(CNode, 1);
1659 // Update the chain.
1660 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1663 SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1666 // Copy the division (low) result, if it is needed.
1667 if (!N.getValue(0).use_empty()) {
1668 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1669 LoReg, NVT, InFlag);
1670 InFlag = Result.getValue(2);
1671 ReplaceUses(N.getValue(0), Result);
1673 DOUT << std::string(Indent-2, ' ') << "=> ";
1674 DEBUG(Result.getNode()->dump(CurDAG));
1678 // Copy the remainder (high) result, if it is needed.
1679 if (!N.getValue(1).use_empty()) {
1681 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1682 // Prevent use of AH in a REX instruction by referencing AX instead.
1683 // Shift it down 8 bits.
1684 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1685 X86::AX, MVT::i16, InFlag);
1686 InFlag = Result.getValue(2);
1687 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
1689 CurDAG->getTargetConstant(8, MVT::i8)),
1691 // Then truncate it down to i8.
1692 SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
1693 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
1694 MVT::i8, Result, SRIdx), 0);
1696 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1697 HiReg, NVT, InFlag);
1698 InFlag = Result.getValue(2);
1700 ReplaceUses(N.getValue(1), Result);
1702 DOUT << std::string(Indent-2, ' ') << "=> ";
1703 DEBUG(Result.getNode()->dump(CurDAG));
1715 case ISD::DECLARE: {
1716 // Handle DECLARE nodes here because the second operand may have been
1717 // wrapped in X86ISD::Wrapper.
1718 SDValue Chain = Node->getOperand(0);
1719 SDValue N1 = Node->getOperand(1);
1720 SDValue N2 = Node->getOperand(2);
1721 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
1723 // FIXME: We need to handle this for VLAs.
1725 ReplaceUses(N.getValue(0), Chain);
1729 if (N2.getOpcode() == ISD::ADD &&
1730 N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
1731 N2 = N2.getOperand(1);
1733 // If N2 is not Wrapper(decriptor) then the llvm.declare is mangled
1734 // somehow, just ignore it.
1735 if (N2.getOpcode() != X86ISD::Wrapper &&
1736 N2.getOpcode() != X86ISD::WrapperRIP) {
1737 ReplaceUses(N.getValue(0), Chain);
1740 GlobalAddressSDNode *GVNode =
1741 dyn_cast<GlobalAddressSDNode>(N2.getOperand(0));
1743 ReplaceUses(N.getValue(0), Chain);
1746 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
1747 TLI.getPointerTy());
1748 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GVNode->getGlobal(),
1749 TLI.getPointerTy());
1750 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1751 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
1753 array_lengthof(Ops));
1757 SDNode *ResNode = SelectCode(N);
1760 DOUT << std::string(Indent-2, ' ') << "=> ";
1761 if (ResNode == NULL || ResNode == N.getNode())
1762 DEBUG(N.getNode()->dump(CurDAG));
1764 DEBUG(ResNode->dump(CurDAG));
1772 bool X86DAGToDAGISel::
1773 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1774 std::vector<SDValue> &OutOps) {
1775 SDValue Op0, Op1, Op2, Op3, Op4;
1776 switch (ConstraintCode) {
1777 case 'o': // offsetable ??
1778 case 'v': // not offsetable ??
1779 default: return true;
1781 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3, Op4))
1786 OutOps.push_back(Op0);
1787 OutOps.push_back(Op1);
1788 OutOps.push_back(Op2);
1789 OutOps.push_back(Op3);
1790 OutOps.push_back(Op4);
1794 /// createX86ISelDag - This pass converts a legalized DAG into a
1795 /// X86-specific DAG, ready for instruction scheduling.
1797 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
1798 llvm::CodeGenOpt::Level OptLevel) {
1799 return new X86DAGToDAGISel(TM, OptLevel);