1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/SelectionDAGISel.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CFG.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/Statistic.h"
42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
44 //===----------------------------------------------------------------------===//
45 // Pattern Matcher Implementation
46 //===----------------------------------------------------------------------===//
49 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
50 /// SDValue's instead of register numbers for the leaves of the matched
52 struct X86ISelAddressMode {
58 // This is really a union, discriminated by BaseType!
66 const GlobalValue *GV;
68 const BlockAddress *BlockAddr;
71 unsigned Align; // CP alignment.
72 unsigned char SymbolFlags; // X86II::MO_*
75 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
76 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
77 SymbolFlags(X86II::MO_NO_FLAG) {
80 bool hasSymbolicDisplacement() const {
81 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
84 bool hasBaseOrIndexReg() const {
85 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
88 /// isRIPRelative - Return true if this addressing mode is already RIP
90 bool isRIPRelative() const {
91 if (BaseType != RegBase) return false;
92 if (RegisterSDNode *RegNode =
93 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
94 return RegNode->getReg() == X86::RIP;
98 void setBaseReg(SDValue Reg) {
104 dbgs() << "X86ISelAddressMode " << this << '\n';
105 dbgs() << "Base_Reg ";
106 if (Base_Reg.getNode() != 0)
107 Base_Reg.getNode()->dump();
110 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
111 << " Scale" << Scale << '\n'
113 if (IndexReg.getNode() != 0)
114 IndexReg.getNode()->dump();
117 dbgs() << " Disp " << Disp << '\n'
134 dbgs() << " JT" << JT << " Align" << Align << '\n';
140 //===--------------------------------------------------------------------===//
141 /// ISel - X86 specific code to select X86 machine instructions for
142 /// SelectionDAG operations.
144 class X86DAGToDAGISel : public SelectionDAGISel {
145 /// X86Lowering - This object fully describes how to lower LLVM code to an
146 /// X86-specific SelectionDAG.
147 const X86TargetLowering &X86Lowering;
149 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
150 /// make the right decision when generating code for different targets.
151 const X86Subtarget *Subtarget;
153 /// OptForSize - If true, selector should try to optimize for code size
154 /// instead of performance.
158 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
159 : SelectionDAGISel(tm, OptLevel),
160 X86Lowering(*tm.getTargetLowering()),
161 Subtarget(&tm.getSubtarget<X86Subtarget>()),
164 virtual const char *getPassName() const {
165 return "X86 DAG->DAG Instruction Selection";
168 virtual void EmitFunctionEntryCode();
170 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
172 virtual void PreprocessISelDAG();
174 inline bool immSext8(SDNode *N) const {
175 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
178 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
179 // sign extended field.
180 inline bool i64immSExt32(SDNode *N) const {
181 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
182 return (int64_t)v == (int32_t)v;
185 // Include the pieces autogenerated from the target description.
186 #include "X86GenDAGISel.inc"
189 SDNode *Select(SDNode *N);
190 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
191 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
192 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
194 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
195 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
196 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
197 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
198 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
200 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
201 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
202 SDValue &Scale, SDValue &Index, SDValue &Disp,
204 bool SelectLEAAddr(SDValue N, SDValue &Base,
205 SDValue &Scale, SDValue &Index, SDValue &Disp,
207 bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
208 SDValue &Scale, SDValue &Index, SDValue &Disp,
210 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
211 SDValue &Base, SDValue &Scale,
212 SDValue &Index, SDValue &Disp,
214 SDValue &NodeWithChain);
216 bool TryFoldLoad(SDNode *P, SDValue N,
217 SDValue &Base, SDValue &Scale,
218 SDValue &Index, SDValue &Disp,
221 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
222 /// inline asm expressions.
223 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
225 std::vector<SDValue> &OutOps);
227 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
229 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
230 SDValue &Scale, SDValue &Index,
231 SDValue &Disp, SDValue &Segment) {
232 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
233 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
235 Scale = getI8Imm(AM.Scale);
237 // These are 32-bit even in 64-bit mode since RIP relative offset
240 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
244 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
245 AM.Align, AM.Disp, AM.SymbolFlags);
247 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
248 else if (AM.JT != -1)
249 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
250 else if (AM.BlockAddr)
251 Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
252 true, AM.SymbolFlags);
254 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
256 if (AM.Segment.getNode())
257 Segment = AM.Segment;
259 Segment = CurDAG->getRegister(0, MVT::i32);
262 /// getI8Imm - Return a target constant with the specified value, of type
264 inline SDValue getI8Imm(unsigned Imm) {
265 return CurDAG->getTargetConstant(Imm, MVT::i8);
268 /// getI32Imm - Return a target constant with the specified value, of type
270 inline SDValue getI32Imm(unsigned Imm) {
271 return CurDAG->getTargetConstant(Imm, MVT::i32);
274 /// getGlobalBaseReg - Return an SDNode that returns the value of
275 /// the global base register. Output instructions required to
276 /// initialize the global base register, if necessary.
278 SDNode *getGlobalBaseReg();
280 /// getTargetMachine - Return a reference to the TargetMachine, casted
281 /// to the target-specific type.
282 const X86TargetMachine &getTargetMachine() {
283 return static_cast<const X86TargetMachine &>(TM);
286 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
287 /// to the target-specific type.
288 const X86InstrInfo *getInstrInfo() {
289 return getTargetMachine().getInstrInfo();
296 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
297 if (OptLevel == CodeGenOpt::None) return false;
302 if (N.getOpcode() != ISD::LOAD)
305 // If N is a load, do additional profitability checks.
307 switch (U->getOpcode()) {
320 SDValue Op1 = U->getOperand(1);
322 // If the other operand is a 8-bit immediate we should fold the immediate
323 // instead. This reduces code size.
325 // movl 4(%esp), %eax
329 // addl 4(%esp), %eax
330 // The former is 2 bytes shorter. In case where the increment is 1, then
331 // the saving can be 4 bytes (by using incl %eax).
332 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
333 if (Imm->getAPIntValue().isSignedIntN(8))
336 // If the other operand is a TLS address, we should fold it instead.
339 // leal i@NTPOFF(%eax), %eax
341 // movl $i@NTPOFF, %eax
343 // if the block also has an access to a second TLS address this will save
345 // FIXME: This is probably also true for non TLS addresses.
346 if (Op1.getOpcode() == X86ISD::Wrapper) {
347 SDValue Val = Op1.getOperand(0);
348 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
358 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
359 /// load's chain operand and move load below the call's chain operand.
360 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
361 SDValue Call, SDValue OrigChain) {
362 SmallVector<SDValue, 8> Ops;
363 SDValue Chain = OrigChain.getOperand(0);
364 if (Chain.getNode() == Load.getNode())
365 Ops.push_back(Load.getOperand(0));
367 assert(Chain.getOpcode() == ISD::TokenFactor &&
368 "Unexpected chain operand");
369 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
370 if (Chain.getOperand(i).getNode() == Load.getNode())
371 Ops.push_back(Load.getOperand(0));
373 Ops.push_back(Chain.getOperand(i));
375 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
376 MVT::Other, &Ops[0], Ops.size());
378 Ops.push_back(NewChain);
380 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
381 Ops.push_back(OrigChain.getOperand(i));
382 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
383 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
384 Load.getOperand(1), Load.getOperand(2));
386 Ops.push_back(SDValue(Load.getNode(), 1));
387 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
388 Ops.push_back(Call.getOperand(i));
389 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
392 /// isCalleeLoad - Return true if call address is a load and it can be
393 /// moved below CALLSEQ_START and the chains leading up to the call.
394 /// Return the CALLSEQ_START by reference as a second output.
395 /// In the case of a tail call, there isn't a callseq node between the call
396 /// chain and the load.
397 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
398 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
400 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
403 LD->getAddressingMode() != ISD::UNINDEXED ||
404 LD->getExtensionType() != ISD::NON_EXTLOAD)
407 // Now let's find the callseq_start.
408 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
409 if (!Chain.hasOneUse())
411 Chain = Chain.getOperand(0);
414 if (!Chain.getNumOperands())
416 if (Chain.getOperand(0).getNode() == Callee.getNode())
418 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
419 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
420 Callee.getValue(1).hasOneUse())
425 void X86DAGToDAGISel::PreprocessISelDAG() {
426 // OptForSize is used in pattern predicates that isel is matching.
427 OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
429 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
430 E = CurDAG->allnodes_end(); I != E; ) {
431 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
433 if (OptLevel != CodeGenOpt::None &&
434 (N->getOpcode() == X86ISD::CALL ||
435 N->getOpcode() == X86ISD::TC_RETURN)) {
436 /// Also try moving call address load from outside callseq_start to just
437 /// before the call to allow it to be folded.
455 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
456 SDValue Chain = N->getOperand(0);
457 SDValue Load = N->getOperand(1);
458 if (!isCalleeLoad(Load, Chain, HasCallSeq))
460 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
465 // Lower fpround and fpextend nodes that target the FP stack to be store and
466 // load to the stack. This is a gross hack. We would like to simply mark
467 // these as being illegal, but when we do that, legalize produces these when
468 // it expands calls, then expands these in the same legalize pass. We would
469 // like dag combine to be able to hack on these between the call expansion
470 // and the node legalization. As such this pass basically does "really
471 // late" legalization of these inline with the X86 isel pass.
472 // FIXME: This should only happen when not compiled with -O0.
473 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
476 EVT SrcVT = N->getOperand(0).getValueType();
477 EVT DstVT = N->getValueType(0);
479 // If any of the sources are vectors, no fp stack involved.
480 if (SrcVT.isVector() || DstVT.isVector())
483 // If the source and destination are SSE registers, then this is a legal
484 // conversion that should not be lowered.
485 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
486 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
487 if (SrcIsSSE && DstIsSSE)
490 if (!SrcIsSSE && !DstIsSSE) {
491 // If this is an FPStack extension, it is a noop.
492 if (N->getOpcode() == ISD::FP_EXTEND)
494 // If this is a value-preserving FPStack truncation, it is a noop.
495 if (N->getConstantOperandVal(1))
499 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
500 // FPStack has extload and truncstore. SSE can fold direct loads into other
501 // operations. Based on this, decide what we want to do.
503 if (N->getOpcode() == ISD::FP_ROUND)
504 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
506 MemVT = SrcIsSSE ? SrcVT : DstVT;
508 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
509 DebugLoc dl = N->getDebugLoc();
511 // FIXME: optimize the case where the src/dest is a load or store?
512 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
514 MemTmp, MachinePointerInfo(), MemVT,
516 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
517 MachinePointerInfo(),
518 MemVT, false, false, 0);
520 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
521 // extload we created. This will cause general havok on the dag because
522 // anything below the conversion could be folded into other existing nodes.
523 // To avoid invalidating 'I', back it up to the convert node.
525 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
527 // Now that we did that, the node is dead. Increment the iterator to the
528 // next node to process, then delete N.
530 CurDAG->DeleteNode(N);
535 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
536 /// the main function.
537 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
538 MachineFrameInfo *MFI) {
539 const TargetInstrInfo *TII = TM.getInstrInfo();
540 if (Subtarget->isTargetCygMing()) {
542 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
543 BuildMI(BB, DebugLoc(),
544 TII->get(CallOp)).addExternalSymbol("__main");
548 void X86DAGToDAGISel::EmitFunctionEntryCode() {
549 // If this is main, emit special code for main.
550 if (const Function *Fn = MF->getFunction())
551 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
552 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
555 static bool isDispSafeForFrameIndex(int64_t Val) {
556 // On 64-bit platforms, we can run into an issue where a frame index
557 // includes a displacement that, when added to the explicit displacement,
558 // will overflow the displacement field. Assuming that the frame index
559 // displacement fits into a 31-bit integer (which is only slightly more
560 // aggressive than the current fundamental assumption that it fits into
561 // a 32-bit integer), a 31-bit disp should always be safe.
562 return isInt<31>(Val);
565 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
566 X86ISelAddressMode &AM) {
567 int64_t Val = AM.Disp + Offset;
568 CodeModel::Model M = TM.getCodeModel();
569 if (Subtarget->is64Bit()) {
570 if (!X86::isOffsetSuitableForCodeModel(Val, M,
571 AM.hasSymbolicDisplacement()))
573 // In addition to the checks required for a register base, check that
574 // we do not try to use an unsafe Disp with a frame index.
575 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
576 !isDispSafeForFrameIndex(Val))
584 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
585 SDValue Address = N->getOperand(1);
587 // load gs:0 -> GS segment register.
588 // load fs:0 -> FS segment register.
590 // This optimization is valid because the GNU TLS model defines that
591 // gs:0 (or fs:0 on X86-64) contains its own address.
592 // For more information see http://people.redhat.com/drepper/tls.pdf
593 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
594 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
595 Subtarget->isTargetELF())
596 switch (N->getPointerInfo().getAddrSpace()) {
598 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
601 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
608 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
609 /// into an addressing mode. These wrap things that will resolve down into a
610 /// symbol reference. If no match is possible, this returns true, otherwise it
612 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
613 // If the addressing mode already has a symbol as the displacement, we can
614 // never match another symbol.
615 if (AM.hasSymbolicDisplacement())
618 SDValue N0 = N.getOperand(0);
619 CodeModel::Model M = TM.getCodeModel();
621 // Handle X86-64 rip-relative addresses. We check this before checking direct
622 // folding because RIP is preferable to non-RIP accesses.
623 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
624 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
625 // they cannot be folded into immediate fields.
626 // FIXME: This can be improved for kernel and other models?
627 (M == CodeModel::Small || M == CodeModel::Kernel)) {
628 // Base and index reg must be 0 in order to use %rip as base.
629 if (AM.hasBaseOrIndexReg())
631 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
632 X86ISelAddressMode Backup = AM;
633 AM.GV = G->getGlobal();
634 AM.SymbolFlags = G->getTargetFlags();
635 if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
639 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
640 X86ISelAddressMode Backup = AM;
641 AM.CP = CP->getConstVal();
642 AM.Align = CP->getAlignment();
643 AM.SymbolFlags = CP->getTargetFlags();
644 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
648 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
649 AM.ES = S->getSymbol();
650 AM.SymbolFlags = S->getTargetFlags();
651 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
652 AM.JT = J->getIndex();
653 AM.SymbolFlags = J->getTargetFlags();
655 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
656 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
659 if (N.getOpcode() == X86ISD::WrapperRIP)
660 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
664 // Handle the case when globals fit in our immediate field: This is true for
665 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
666 // mode, this only applies to a non-RIP-relative computation.
667 if (!Subtarget->is64Bit() ||
668 M == CodeModel::Small || M == CodeModel::Kernel) {
669 assert(N.getOpcode() != X86ISD::WrapperRIP &&
670 "RIP-relative addressing already handled");
671 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
672 AM.GV = G->getGlobal();
673 AM.Disp += G->getOffset();
674 AM.SymbolFlags = G->getTargetFlags();
675 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
676 AM.CP = CP->getConstVal();
677 AM.Align = CP->getAlignment();
678 AM.Disp += CP->getOffset();
679 AM.SymbolFlags = CP->getTargetFlags();
680 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
681 AM.ES = S->getSymbol();
682 AM.SymbolFlags = S->getTargetFlags();
683 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
684 AM.JT = J->getIndex();
685 AM.SymbolFlags = J->getTargetFlags();
687 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
688 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
696 /// MatchAddress - Add the specified node to the specified addressing mode,
697 /// returning true if it cannot be done. This just pattern matches for the
699 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
700 if (MatchAddressRecursively(N, AM, 0))
703 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
704 // a smaller encoding and avoids a scaled-index.
706 AM.BaseType == X86ISelAddressMode::RegBase &&
707 AM.Base_Reg.getNode() == 0) {
708 AM.Base_Reg = AM.IndexReg;
712 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
713 // because it has a smaller encoding.
714 // TODO: Which other code models can use this?
715 if (TM.getCodeModel() == CodeModel::Small &&
716 Subtarget->is64Bit() &&
718 AM.BaseType == X86ISelAddressMode::RegBase &&
719 AM.Base_Reg.getNode() == 0 &&
720 AM.IndexReg.getNode() == 0 &&
721 AM.SymbolFlags == X86II::MO_NO_FLAG &&
722 AM.hasSymbolicDisplacement())
723 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
728 // Insert a node into the DAG at least before the Pos node's position. This
729 // will reposition the node as needed, and will assign it a node ID that is <=
730 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
731 // IDs! The selection DAG must no longer depend on their uniqueness when this
733 static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
734 if (N.getNode()->getNodeId() == -1 ||
735 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
736 DAG.RepositionNode(Pos.getNode(), N.getNode());
737 N.getNode()->setNodeId(Pos.getNode()->getNodeId());
741 // Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
742 // allows us to convert the shift and and into an h-register extract and
743 // a scaled index. Returns false if the simplification is performed.
744 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
746 SDValue Shift, SDValue X,
747 X86ISelAddressMode &AM) {
748 if (Shift.getOpcode() != ISD::SRL ||
749 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
753 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
754 if (ScaleLog <= 0 || ScaleLog >= 4 ||
755 Mask != (0xffu << ScaleLog))
758 EVT VT = N.getValueType();
759 DebugLoc DL = N.getDebugLoc();
760 SDValue Eight = DAG.getConstant(8, MVT::i8);
761 SDValue NewMask = DAG.getConstant(0xff, VT);
762 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
763 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
764 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
765 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
767 // Insert the new nodes into the topological ordering. We must do this in
768 // a valid topological ordering as nothing is going to go back and re-sort
769 // these nodes. We continually insert before 'N' in sequence as this is
770 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
771 // hierarchy left to express.
772 InsertDAGNode(DAG, N, Eight);
773 InsertDAGNode(DAG, N, Srl);
774 InsertDAGNode(DAG, N, NewMask);
775 InsertDAGNode(DAG, N, And);
776 InsertDAGNode(DAG, N, ShlCount);
777 InsertDAGNode(DAG, N, Shl);
778 DAG.ReplaceAllUsesWith(N, Shl);
780 AM.Scale = (1 << ScaleLog);
784 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
785 // allows us to fold the shift into this addressing mode. Returns false if the
786 // transform succeeded.
787 static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
789 SDValue Shift, SDValue X,
790 X86ISelAddressMode &AM) {
791 if (Shift.getOpcode() != ISD::SHL ||
792 !isa<ConstantSDNode>(Shift.getOperand(1)))
795 // Not likely to be profitable if either the AND or SHIFT node has more
796 // than one use (unless all uses are for address computation). Besides,
797 // isel mechanism requires their node ids to be reused.
798 if (!N.hasOneUse() || !Shift.hasOneUse())
801 // Verify that the shift amount is something we can fold.
802 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
803 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
806 EVT VT = N.getValueType();
807 DebugLoc DL = N.getDebugLoc();
808 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
809 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
810 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
812 // Insert the new nodes into the topological ordering. We must do this in
813 // a valid topological ordering as nothing is going to go back and re-sort
814 // these nodes. We continually insert before 'N' in sequence as this is
815 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
816 // hierarchy left to express.
817 InsertDAGNode(DAG, N, NewMask);
818 InsertDAGNode(DAG, N, NewAnd);
819 InsertDAGNode(DAG, N, NewShift);
820 DAG.ReplaceAllUsesWith(N, NewShift);
822 AM.Scale = 1 << ShiftAmt;
823 AM.IndexReg = NewAnd;
827 // Implement some heroics to detect shifts of masked values where the mask can
828 // be replaced by extending the shift and undoing that in the addressing mode
829 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
830 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
831 // the addressing mode. This results in code such as:
833 // int f(short *y, int *lookup_table) {
835 // return *y + lookup_table[*y >> 11];
839 // movzwl (%rdi), %eax
842 // addl (%rsi,%rcx,4), %eax
845 // movzwl (%rdi), %eax
849 // addl (%rsi,%rcx), %eax
851 // Note that this function assumes the mask is provided as a mask *after* the
852 // value is shifted. The input chain may or may not match that, but computing
853 // such a mask is trivial.
854 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
856 SDValue Shift, SDValue X,
857 X86ISelAddressMode &AM) {
858 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
859 !isa<ConstantSDNode>(Shift.getOperand(1)))
862 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
863 unsigned MaskLZ = CountLeadingZeros_64(Mask);
864 unsigned MaskTZ = CountTrailingZeros_64(Mask);
866 // The amount of shift we're trying to fit into the addressing mode is taken
867 // from the trailing zeros of the mask.
868 unsigned AMShiftAmt = MaskTZ;
870 // There is nothing we can do here unless the mask is removing some bits.
871 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
872 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
874 // We also need to ensure that mask is a continuous run of bits.
875 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
877 // Scale the leading zero count down based on the actual size of the value.
878 // Also scale it down based on the size of the shift.
879 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
881 // The final check is to ensure that any masked out high bits of X are
882 // already known to be zero. Otherwise, the mask has a semantic impact
883 // other than masking out a couple of low bits. Unfortunately, because of
884 // the mask, zero extensions will be removed from operands in some cases.
885 // This code works extra hard to look through extensions because we can
886 // replace them with zero extensions cheaply if necessary.
887 bool ReplacingAnyExtend = false;
888 if (X.getOpcode() == ISD::ANY_EXTEND) {
889 unsigned ExtendBits =
890 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
891 // Assume that we'll replace the any-extend with a zero-extend, and
892 // narrow the search to the extended value.
894 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
895 ReplacingAnyExtend = true;
897 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
899 APInt KnownZero, KnownOne;
900 DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
901 if (MaskedHighBits != KnownZero) return true;
903 // We've identified a pattern that can be transformed into a single shift
904 // and an addressing mode. Make it so.
905 EVT VT = N.getValueType();
906 if (ReplacingAnyExtend) {
907 assert(X.getValueType() != VT);
908 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
909 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
910 InsertDAGNode(DAG, N, NewX);
913 DebugLoc DL = N.getDebugLoc();
914 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
915 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
916 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
917 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
919 // Insert the new nodes into the topological ordering. We must do this in
920 // a valid topological ordering as nothing is going to go back and re-sort
921 // these nodes. We continually insert before 'N' in sequence as this is
922 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
923 // hierarchy left to express.
924 InsertDAGNode(DAG, N, NewSRLAmt);
925 InsertDAGNode(DAG, N, NewSRL);
926 InsertDAGNode(DAG, N, NewSHLAmt);
927 InsertDAGNode(DAG, N, NewSHL);
928 DAG.ReplaceAllUsesWith(N, NewSHL);
930 AM.Scale = 1 << AMShiftAmt;
931 AM.IndexReg = NewSRL;
935 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
937 DebugLoc dl = N.getDebugLoc();
939 dbgs() << "MatchAddress: ";
944 return MatchAddressBase(N, AM);
946 // If this is already a %rip relative address, we can only merge immediates
947 // into it. Instead of handling this in every case, we handle it here.
948 // RIP relative addressing: %rip + 32-bit displacement!
949 if (AM.isRIPRelative()) {
950 // FIXME: JumpTable and ExternalSymbol address currently don't like
951 // displacements. It isn't very important, but this should be fixed for
953 if (!AM.ES && AM.JT != -1) return true;
955 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
956 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
961 switch (N.getOpcode()) {
963 case ISD::Constant: {
964 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
965 if (!FoldOffsetIntoAddress(Val, AM))
970 case X86ISD::Wrapper:
971 case X86ISD::WrapperRIP:
972 if (!MatchWrapper(N, AM))
977 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
981 case ISD::FrameIndex:
982 if (AM.BaseType == X86ISelAddressMode::RegBase &&
983 AM.Base_Reg.getNode() == 0 &&
984 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
985 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
986 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
992 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
996 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
997 unsigned Val = CN->getZExtValue();
998 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
999 // that the base operand remains free for further matching. If
1000 // the base doesn't end up getting used, a post-processing step
1001 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1002 if (Val == 1 || Val == 2 || Val == 3) {
1003 AM.Scale = 1 << Val;
1004 SDValue ShVal = N.getNode()->getOperand(0);
1006 // Okay, we know that we have a scale by now. However, if the scaled
1007 // value is an add of something and a constant, we can fold the
1008 // constant into the disp field here.
1009 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1010 AM.IndexReg = ShVal.getNode()->getOperand(0);
1011 ConstantSDNode *AddVal =
1012 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1013 uint64_t Disp = AddVal->getSExtValue() << Val;
1014 if (!FoldOffsetIntoAddress(Disp, AM))
1018 AM.IndexReg = ShVal;
1025 // Scale must not be used already.
1026 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1028 SDValue And = N.getOperand(0);
1029 if (And.getOpcode() != ISD::AND) break;
1030 SDValue X = And.getOperand(0);
1032 // We only handle up to 64-bit values here as those are what matter for
1033 // addressing mode optimizations.
1034 if (X.getValueSizeInBits() > 64) break;
1036 // The mask used for the transform is expected to be post-shift, but we
1037 // found the shift first so just apply the shift to the mask before passing
1039 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1040 !isa<ConstantSDNode>(And.getOperand(1)))
1042 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1044 // Try to fold the mask and shift into the scale, and return false if we
1046 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1051 case ISD::SMUL_LOHI:
1052 case ISD::UMUL_LOHI:
1053 // A mul_lohi where we need the low part can be folded as a plain multiply.
1054 if (N.getResNo() != 0) break;
1057 case X86ISD::MUL_IMM:
1058 // X*[3,5,9] -> X+X*[2,4,8]
1059 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1060 AM.Base_Reg.getNode() == 0 &&
1061 AM.IndexReg.getNode() == 0) {
1063 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1064 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1065 CN->getZExtValue() == 9) {
1066 AM.Scale = unsigned(CN->getZExtValue())-1;
1068 SDValue MulVal = N.getNode()->getOperand(0);
1071 // Okay, we know that we have a scale by now. However, if the scaled
1072 // value is an add of something and a constant, we can fold the
1073 // constant into the disp field here.
1074 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1075 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1076 Reg = MulVal.getNode()->getOperand(0);
1077 ConstantSDNode *AddVal =
1078 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1079 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1080 if (FoldOffsetIntoAddress(Disp, AM))
1081 Reg = N.getNode()->getOperand(0);
1083 Reg = N.getNode()->getOperand(0);
1086 AM.IndexReg = AM.Base_Reg = Reg;
1093 // Given A-B, if A can be completely folded into the address and
1094 // the index field with the index field unused, use -B as the index.
1095 // This is a win if a has multiple parts that can be folded into
1096 // the address. Also, this saves a mov if the base register has
1097 // other uses, since it avoids a two-address sub instruction, however
1098 // it costs an additional mov if the index register has other uses.
1100 // Add an artificial use to this node so that we can keep track of
1101 // it if it gets CSE'd with a different node.
1102 HandleSDNode Handle(N);
1104 // Test if the LHS of the sub can be folded.
1105 X86ISelAddressMode Backup = AM;
1106 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1110 // Test if the index field is free for use.
1111 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1117 SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1118 // If the RHS involves a register with multiple uses, this
1119 // transformation incurs an extra mov, due to the neg instruction
1120 // clobbering its operand.
1121 if (!RHS.getNode()->hasOneUse() ||
1122 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1123 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1124 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1125 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1126 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1128 // If the base is a register with multiple uses, this
1129 // transformation may save a mov.
1130 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1131 AM.Base_Reg.getNode() &&
1132 !AM.Base_Reg.getNode()->hasOneUse()) ||
1133 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1135 // If the folded LHS was interesting, this transformation saves
1136 // address arithmetic.
1137 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1138 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1139 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1141 // If it doesn't look like it may be an overall win, don't do it.
1147 // Ok, the transformation is legal and appears profitable. Go for it.
1148 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
1149 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1153 // Insert the new nodes into the topological ordering.
1154 InsertDAGNode(*CurDAG, N, Zero);
1155 InsertDAGNode(*CurDAG, N, Neg);
1160 // Add an artificial use to this node so that we can keep track of
1161 // it if it gets CSE'd with a different node.
1162 HandleSDNode Handle(N);
1164 X86ISelAddressMode Backup = AM;
1165 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1166 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1170 // Try again after commuting the operands.
1171 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1172 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1176 // If we couldn't fold both operands into the address at the same time,
1177 // see if we can just put each operand into a register and fold at least
1179 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1180 !AM.Base_Reg.getNode() &&
1181 !AM.IndexReg.getNode()) {
1182 N = Handle.getValue();
1183 AM.Base_Reg = N.getOperand(0);
1184 AM.IndexReg = N.getOperand(1);
1188 N = Handle.getValue();
1193 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1194 if (CurDAG->isBaseWithConstantOffset(N)) {
1195 X86ISelAddressMode Backup = AM;
1196 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
1198 // Start with the LHS as an addr mode.
1199 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1200 !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
1207 // Perform some heroic transforms on an and of a constant-count shift
1208 // with a constant to enable use of the scaled offset field.
1210 // Scale must not be used already.
1211 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1213 SDValue Shift = N.getOperand(0);
1214 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1215 SDValue X = Shift.getOperand(0);
1217 // We only handle up to 64-bit values here as those are what matter for
1218 // addressing mode optimizations.
1219 if (X.getValueSizeInBits() > 64) break;
1221 if (!isa<ConstantSDNode>(N.getOperand(1)))
1223 uint64_t Mask = N.getConstantOperandVal(1);
1225 // Try to fold the mask and shift into an extract and scale.
1226 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1229 // Try to fold the mask and shift directly into the scale.
1230 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1233 // Try to swap the mask and shift to place shifts which can be done as
1234 // a scale on the outside of the mask.
1235 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1241 return MatchAddressBase(N, AM);
1244 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1245 /// specified addressing mode without any further recursion.
1246 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1247 // Is the base register already occupied?
1248 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1249 // If so, check to see if the scale index register is set.
1250 if (AM.IndexReg.getNode() == 0) {
1256 // Otherwise, we cannot select it.
1260 // Default, generate it as a register.
1261 AM.BaseType = X86ISelAddressMode::RegBase;
1266 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1267 /// It returns the operands which make up the maximal addressing mode it can
1268 /// match by reference.
1270 /// Parent is the parent node of the addr operand that is being matched. It
1271 /// is always a load, store, atomic node, or null. It is only null when
1272 /// checking memory operands for inline asm nodes.
1273 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1274 SDValue &Scale, SDValue &Index,
1275 SDValue &Disp, SDValue &Segment) {
1276 X86ISelAddressMode AM;
1279 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1280 // that are not a MemSDNode, and thus don't have proper addrspace info.
1281 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1282 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1283 Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
1284 unsigned AddrSpace =
1285 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1286 // AddrSpace 256 -> GS, 257 -> FS.
1287 if (AddrSpace == 256)
1288 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1289 if (AddrSpace == 257)
1290 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1293 if (MatchAddress(N, AM))
1296 EVT VT = N.getValueType();
1297 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1298 if (!AM.Base_Reg.getNode())
1299 AM.Base_Reg = CurDAG->getRegister(0, VT);
1302 if (!AM.IndexReg.getNode())
1303 AM.IndexReg = CurDAG->getRegister(0, VT);
1305 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1309 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1310 /// match a load whose top elements are either undef or zeros. The load flavor
1311 /// is derived from the type of N, which is either v4f32 or v2f64.
1314 /// PatternChainNode: this is the matched node that has a chain input and
1316 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1317 SDValue N, SDValue &Base,
1318 SDValue &Scale, SDValue &Index,
1319 SDValue &Disp, SDValue &Segment,
1320 SDValue &PatternNodeWithChain) {
1321 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1322 PatternNodeWithChain = N.getOperand(0);
1323 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1324 PatternNodeWithChain.hasOneUse() &&
1325 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1326 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1327 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1328 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1334 // Also handle the case where we explicitly require zeros in the top
1335 // elements. This is a vector shuffle from the zero vector.
1336 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1337 // Check to see if the top elements are all zeros (or bitcast of zeros).
1338 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1339 N.getOperand(0).getNode()->hasOneUse() &&
1340 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1341 N.getOperand(0).getOperand(0).hasOneUse() &&
1342 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1343 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1344 // Okay, this is a zero extending load. Fold it.
1345 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1346 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1348 PatternNodeWithChain = SDValue(LD, 0);
1355 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1356 /// mode it matches can be cost effectively emitted as an LEA instruction.
1357 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1358 SDValue &Base, SDValue &Scale,
1359 SDValue &Index, SDValue &Disp,
1361 X86ISelAddressMode AM;
1363 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1365 SDValue Copy = AM.Segment;
1366 SDValue T = CurDAG->getRegister(0, MVT::i32);
1368 if (MatchAddress(N, AM))
1370 assert (T == AM.Segment);
1373 EVT VT = N.getValueType();
1374 unsigned Complexity = 0;
1375 if (AM.BaseType == X86ISelAddressMode::RegBase)
1376 if (AM.Base_Reg.getNode())
1379 AM.Base_Reg = CurDAG->getRegister(0, VT);
1380 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1383 if (AM.IndexReg.getNode())
1386 AM.IndexReg = CurDAG->getRegister(0, VT);
1388 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1393 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1394 // to a LEA. This is determined with some expermentation but is by no means
1395 // optimal (especially for code size consideration). LEA is nice because of
1396 // its three-address nature. Tweak the cost function again when we can run
1397 // convertToThreeAddress() at register allocation time.
1398 if (AM.hasSymbolicDisplacement()) {
1399 // For X86-64, we should always use lea to materialize RIP relative
1401 if (Subtarget->is64Bit())
1407 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1410 // If it isn't worth using an LEA, reject it.
1411 if (Complexity <= 2)
1414 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1418 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1419 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1420 SDValue &Scale, SDValue &Index,
1421 SDValue &Disp, SDValue &Segment) {
1422 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1423 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1425 X86ISelAddressMode AM;
1426 AM.GV = GA->getGlobal();
1427 AM.Disp += GA->getOffset();
1428 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1429 AM.SymbolFlags = GA->getTargetFlags();
1431 if (N.getValueType() == MVT::i32) {
1433 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1435 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1438 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1443 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1444 SDValue &Base, SDValue &Scale,
1445 SDValue &Index, SDValue &Disp,
1447 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1448 !IsProfitableToFold(N, P, P) ||
1449 !IsLegalToFold(N, P, P, OptLevel))
1452 return SelectAddr(N.getNode(),
1453 N.getOperand(1), Base, Scale, Index, Disp, Segment);
1456 /// getGlobalBaseReg - Return an SDNode that returns the value of
1457 /// the global base register. Output instructions required to
1458 /// initialize the global base register, if necessary.
1460 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1461 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1462 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1465 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1466 SDValue Chain = Node->getOperand(0);
1467 SDValue In1 = Node->getOperand(1);
1468 SDValue In2L = Node->getOperand(2);
1469 SDValue In2H = Node->getOperand(3);
1470 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1471 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1473 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1474 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1475 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1476 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1477 MVT::i32, MVT::i32, MVT::Other, Ops,
1478 array_lengthof(Ops));
1479 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1483 // FIXME: Figure out some way to unify this with the 'or' and other code
1485 SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
1486 if (Node->hasAnyUseOfValue(0))
1489 // Optimize common patterns for __sync_add_and_fetch and
1490 // __sync_sub_and_fetch where the result is not used. This allows us
1491 // to use "lock" version of add, sub, inc, dec instructions.
1492 // FIXME: Do not use special instructions but instead add the "lock"
1493 // prefix to the target node somehow. The extra information will then be
1494 // transferred to machine instruction and it denotes the prefix.
1495 SDValue Chain = Node->getOperand(0);
1496 SDValue Ptr = Node->getOperand(1);
1497 SDValue Val = Node->getOperand(2);
1498 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1499 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1502 bool isInc = false, isDec = false, isSub = false, isCN = false;
1503 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1504 if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) {
1506 int64_t CNVal = CN->getSExtValue();
1509 else if (CNVal == -1)
1511 else if (CNVal >= 0)
1512 Val = CurDAG->getTargetConstant(CNVal, NVT);
1515 Val = CurDAG->getTargetConstant(-CNVal, NVT);
1517 } else if (Val.hasOneUse() &&
1518 Val.getOpcode() == ISD::SUB &&
1519 X86::isZeroNode(Val.getOperand(0))) {
1521 Val = Val.getOperand(1);
1524 DebugLoc dl = Node->getDebugLoc();
1526 switch (NVT.getSimpleVT().SimpleTy) {
1530 Opc = X86::LOCK_INC8m;
1532 Opc = X86::LOCK_DEC8m;
1535 Opc = X86::LOCK_SUB8mi;
1537 Opc = X86::LOCK_SUB8mr;
1540 Opc = X86::LOCK_ADD8mi;
1542 Opc = X86::LOCK_ADD8mr;
1547 Opc = X86::LOCK_INC16m;
1549 Opc = X86::LOCK_DEC16m;
1552 if (immSext8(Val.getNode()))
1553 Opc = X86::LOCK_SUB16mi8;
1555 Opc = X86::LOCK_SUB16mi;
1557 Opc = X86::LOCK_SUB16mr;
1560 if (immSext8(Val.getNode()))
1561 Opc = X86::LOCK_ADD16mi8;
1563 Opc = X86::LOCK_ADD16mi;
1565 Opc = X86::LOCK_ADD16mr;
1570 Opc = X86::LOCK_INC32m;
1572 Opc = X86::LOCK_DEC32m;
1575 if (immSext8(Val.getNode()))
1576 Opc = X86::LOCK_SUB32mi8;
1578 Opc = X86::LOCK_SUB32mi;
1580 Opc = X86::LOCK_SUB32mr;
1583 if (immSext8(Val.getNode()))
1584 Opc = X86::LOCK_ADD32mi8;
1586 Opc = X86::LOCK_ADD32mi;
1588 Opc = X86::LOCK_ADD32mr;
1593 Opc = X86::LOCK_INC64m;
1595 Opc = X86::LOCK_DEC64m;
1597 Opc = X86::LOCK_SUB64mr;
1599 if (immSext8(Val.getNode()))
1600 Opc = X86::LOCK_SUB64mi8;
1601 else if (i64immSExt32(Val.getNode()))
1602 Opc = X86::LOCK_SUB64mi32;
1605 Opc = X86::LOCK_ADD64mr;
1607 if (immSext8(Val.getNode()))
1608 Opc = X86::LOCK_ADD64mi8;
1609 else if (i64immSExt32(Val.getNode()))
1610 Opc = X86::LOCK_ADD64mi32;
1616 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1618 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1619 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1620 if (isInc || isDec) {
1621 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1622 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
1623 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1624 SDValue RetVals[] = { Undef, Ret };
1625 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1627 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1628 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1629 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1630 SDValue RetVals[] = { Undef, Ret };
1631 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1657 static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1681 X86::LOCK_AND64mi32,
1694 X86::LOCK_XOR64mi32,
1699 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
1700 if (Node->hasAnyUseOfValue(0))
1703 // Optimize common patterns for __sync_or_and_fetch and similar arith
1704 // operations where the result is not used. This allows us to use the "lock"
1705 // version of the arithmetic instruction.
1706 // FIXME: Same as for 'add' and 'sub', try to merge those down here.
1707 SDValue Chain = Node->getOperand(0);
1708 SDValue Ptr = Node->getOperand(1);
1709 SDValue Val = Node->getOperand(2);
1710 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1711 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1714 // Which index into the table.
1716 switch (Node->getOpcode()) {
1717 case ISD::ATOMIC_LOAD_OR:
1720 case ISD::ATOMIC_LOAD_AND:
1723 case ISD::ATOMIC_LOAD_XOR:
1731 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1732 if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
1734 Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
1738 switch (NVT.getSimpleVT().SimpleTy) {
1742 Opc = AtomicOpcTbl[Op][ConstantI8];
1744 Opc = AtomicOpcTbl[Op][I8];
1748 if (immSext8(Val.getNode()))
1749 Opc = AtomicOpcTbl[Op][SextConstantI16];
1751 Opc = AtomicOpcTbl[Op][ConstantI16];
1753 Opc = AtomicOpcTbl[Op][I16];
1757 if (immSext8(Val.getNode()))
1758 Opc = AtomicOpcTbl[Op][SextConstantI32];
1760 Opc = AtomicOpcTbl[Op][ConstantI32];
1762 Opc = AtomicOpcTbl[Op][I32];
1765 Opc = AtomicOpcTbl[Op][I64];
1767 if (immSext8(Val.getNode()))
1768 Opc = AtomicOpcTbl[Op][SextConstantI64];
1769 else if (i64immSExt32(Val.getNode()))
1770 Opc = AtomicOpcTbl[Op][ConstantI64];
1775 assert(Opc != 0 && "Invalid arith lock transform!");
1777 DebugLoc dl = Node->getDebugLoc();
1778 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1780 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1781 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1782 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1783 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1784 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1785 SDValue RetVals[] = { Undef, Ret };
1786 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1789 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1790 /// any uses which require the SF or OF bits to be accurate.
1791 static bool HasNoSignedComparisonUses(SDNode *N) {
1792 // Examine each user of the node.
1793 for (SDNode::use_iterator UI = N->use_begin(),
1794 UE = N->use_end(); UI != UE; ++UI) {
1795 // Only examine CopyToReg uses.
1796 if (UI->getOpcode() != ISD::CopyToReg)
1798 // Only examine CopyToReg uses that copy to EFLAGS.
1799 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1802 // Examine each user of the CopyToReg use.
1803 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1804 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1805 // Only examine the Flag result.
1806 if (FlagUI.getUse().getResNo() != 1) continue;
1807 // Anything unusual: assume conservatively.
1808 if (!FlagUI->isMachineOpcode()) return false;
1809 // Examine the opcode of the user.
1810 switch (FlagUI->getMachineOpcode()) {
1811 // These comparisons don't treat the most significant bit specially.
1812 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1813 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1814 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1815 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1816 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1817 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1818 case X86::CMOVA16rr: case X86::CMOVA16rm:
1819 case X86::CMOVA32rr: case X86::CMOVA32rm:
1820 case X86::CMOVA64rr: case X86::CMOVA64rm:
1821 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1822 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1823 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1824 case X86::CMOVB16rr: case X86::CMOVB16rm:
1825 case X86::CMOVB32rr: case X86::CMOVB32rm:
1826 case X86::CMOVB64rr: case X86::CMOVB64rm:
1827 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1828 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1829 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1830 case X86::CMOVE16rr: case X86::CMOVE16rm:
1831 case X86::CMOVE32rr: case X86::CMOVE32rm:
1832 case X86::CMOVE64rr: case X86::CMOVE64rm:
1833 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1834 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1835 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1836 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1837 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1838 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1839 case X86::CMOVP16rr: case X86::CMOVP16rm:
1840 case X86::CMOVP32rr: case X86::CMOVP32rm:
1841 case X86::CMOVP64rr: case X86::CMOVP64rm:
1843 // Anything else: assume conservatively.
1844 default: return false;
1851 /// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
1852 /// is suitable for doing the {load; increment or decrement; store} to modify
1854 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
1855 SDValue StoredVal, SelectionDAG *CurDAG,
1856 LoadSDNode* &LoadNode, SDValue &InputChain) {
1858 // is the value stored the result of a DEC or INC?
1859 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
1861 // is the stored value result 0 of the load?
1862 if (StoredVal.getResNo() != 0) return false;
1864 // are there other uses of the loaded value than the inc or dec?
1865 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
1867 // is the store non-extending and non-indexed?
1868 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1871 SDValue Load = StoredVal->getOperand(0);
1872 // Is the stored value a non-extending and non-indexed load?
1873 if (!ISD::isNormalLoad(Load.getNode())) return false;
1875 // Return LoadNode by reference.
1876 LoadNode = cast<LoadSDNode>(Load);
1877 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
1878 EVT LdVT = LoadNode->getMemoryVT();
1879 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
1883 // Is store the only read of the loaded value?
1884 if (!Load.hasOneUse())
1887 // Is the address of the store the same as the load?
1888 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1889 LoadNode->getOffset() != StoreNode->getOffset())
1892 // Check if the chain is produced by the load or is a TokenFactor with
1893 // the load output chain as an operand. Return InputChain by reference.
1894 SDValue Chain = StoreNode->getChain();
1896 bool ChainCheck = false;
1897 if (Chain == Load.getValue(1)) {
1899 InputChain = LoadNode->getChain();
1900 } else if (Chain.getOpcode() == ISD::TokenFactor) {
1901 SmallVector<SDValue, 4> ChainOps;
1902 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1903 SDValue Op = Chain.getOperand(i);
1904 if (Op == Load.getValue(1)) {
1909 // Make sure using Op as part of the chain would not cause a cycle here.
1910 // In theory, we could check whether the chain node is a predecessor of
1911 // the load. But that can be very expensive. Instead visit the uses and
1912 // make sure they all have smaller node id than the load.
1913 int LoadId = LoadNode->getNodeId();
1914 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
1915 UE = UI->use_end(); UI != UE; ++UI) {
1916 if (UI.getUse().getResNo() != 0)
1918 if (UI->getNodeId() > LoadId)
1922 ChainOps.push_back(Op);
1926 // Make a new TokenFactor with all the other input chains except
1928 InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(),
1929 MVT::Other, &ChainOps[0], ChainOps.size());
1937 /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
1938 /// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
1939 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
1940 if (Opc == X86ISD::DEC) {
1941 if (LdVT == MVT::i64) return X86::DEC64m;
1942 if (LdVT == MVT::i32) return X86::DEC32m;
1943 if (LdVT == MVT::i16) return X86::DEC16m;
1944 if (LdVT == MVT::i8) return X86::DEC8m;
1946 assert(Opc == X86ISD::INC && "unrecognized opcode");
1947 if (LdVT == MVT::i64) return X86::INC64m;
1948 if (LdVT == MVT::i32) return X86::INC32m;
1949 if (LdVT == MVT::i16) return X86::INC16m;
1950 if (LdVT == MVT::i8) return X86::INC8m;
1952 llvm_unreachable("unrecognized size for LdVT");
1955 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1956 EVT NVT = Node->getValueType(0);
1958 unsigned Opcode = Node->getOpcode();
1959 DebugLoc dl = Node->getDebugLoc();
1961 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1963 if (Node->isMachineOpcode()) {
1964 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
1965 return NULL; // Already selected.
1970 case X86ISD::GlobalBaseReg:
1971 return getGlobalBaseReg();
1973 case X86ISD::ATOMOR64_DAG:
1974 return SelectAtomic64(Node, X86::ATOMOR6432);
1975 case X86ISD::ATOMXOR64_DAG:
1976 return SelectAtomic64(Node, X86::ATOMXOR6432);
1977 case X86ISD::ATOMADD64_DAG:
1978 return SelectAtomic64(Node, X86::ATOMADD6432);
1979 case X86ISD::ATOMSUB64_DAG:
1980 return SelectAtomic64(Node, X86::ATOMSUB6432);
1981 case X86ISD::ATOMNAND64_DAG:
1982 return SelectAtomic64(Node, X86::ATOMNAND6432);
1983 case X86ISD::ATOMAND64_DAG:
1984 return SelectAtomic64(Node, X86::ATOMAND6432);
1985 case X86ISD::ATOMSWAP64_DAG:
1986 return SelectAtomic64(Node, X86::ATOMSWAP6432);
1988 case ISD::ATOMIC_LOAD_ADD: {
1989 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
1994 case ISD::ATOMIC_LOAD_XOR:
1995 case ISD::ATOMIC_LOAD_AND:
1996 case ISD::ATOMIC_LOAD_OR: {
1997 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
2005 // For operations of the form (x << C1) op C2, check if we can use a smaller
2006 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2007 SDValue N0 = Node->getOperand(0);
2008 SDValue N1 = Node->getOperand(1);
2010 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2013 // i8 is unshrinkable, i16 should be promoted to i32.
2014 if (NVT != MVT::i32 && NVT != MVT::i64)
2017 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2018 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2019 if (!Cst || !ShlCst)
2022 int64_t Val = Cst->getSExtValue();
2023 uint64_t ShlVal = ShlCst->getZExtValue();
2025 // Make sure that we don't change the operation by removing bits.
2026 // This only matters for OR and XOR, AND is unaffected.
2027 if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val)
2030 unsigned ShlOp, Op = 0;
2033 // Check the minimum bitwidth for the new constant.
2034 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2035 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2036 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2037 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2039 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2042 // Bail if there is no smaller encoding.
2046 switch (NVT.getSimpleVT().SimpleTy) {
2047 default: llvm_unreachable("Unsupported VT!");
2049 assert(CstVT == MVT::i8);
2050 ShlOp = X86::SHL32ri;
2053 case ISD::AND: Op = X86::AND32ri8; break;
2054 case ISD::OR: Op = X86::OR32ri8; break;
2055 case ISD::XOR: Op = X86::XOR32ri8; break;
2059 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2060 ShlOp = X86::SHL64ri;
2063 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2064 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2065 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2070 // Emit the smaller op and the shift.
2071 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
2072 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2073 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2076 case X86ISD::UMUL: {
2077 SDValue N0 = Node->getOperand(0);
2078 SDValue N1 = Node->getOperand(1);
2081 switch (NVT.getSimpleVT().SimpleTy) {
2082 default: llvm_unreachable("Unsupported VT!");
2083 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
2084 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2085 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2086 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2089 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2090 N0, SDValue()).getValue(1);
2092 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2093 SDValue Ops[] = {N1, InFlag};
2094 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
2096 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2097 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2098 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
2102 case ISD::SMUL_LOHI:
2103 case ISD::UMUL_LOHI: {
2104 SDValue N0 = Node->getOperand(0);
2105 SDValue N1 = Node->getOperand(1);
2107 bool isSigned = Opcode == ISD::SMUL_LOHI;
2109 switch (NVT.getSimpleVT().SimpleTy) {
2110 default: llvm_unreachable("Unsupported VT!");
2111 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2112 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2113 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
2114 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
2117 switch (NVT.getSimpleVT().SimpleTy) {
2118 default: llvm_unreachable("Unsupported VT!");
2119 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2120 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2121 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2122 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2126 unsigned LoReg, HiReg;
2127 switch (NVT.getSimpleVT().SimpleTy) {
2128 default: llvm_unreachable("Unsupported VT!");
2129 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
2130 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
2131 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
2132 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
2135 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2136 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2137 // Multiply is commmutative.
2139 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2144 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2145 N0, SDValue()).getValue(1);
2148 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2151 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2152 array_lengthof(Ops));
2153 InFlag = SDValue(CNode, 1);
2155 // Update the chain.
2156 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2158 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
2159 InFlag = SDValue(CNode, 0);
2162 // Prevent use of AH in a REX instruction by referencing AX instead.
2163 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2164 !SDValue(Node, 1).use_empty()) {
2165 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2166 X86::AX, MVT::i16, InFlag);
2167 InFlag = Result.getValue(2);
2168 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2170 if (!SDValue(Node, 0).use_empty())
2171 ReplaceUses(SDValue(Node, 1),
2172 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2174 // Shift AX down 8 bits.
2175 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2177 CurDAG->getTargetConstant(8, MVT::i8)), 0);
2178 // Then truncate it down to i8.
2179 ReplaceUses(SDValue(Node, 1),
2180 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2182 // Copy the low half of the result, if it is needed.
2183 if (!SDValue(Node, 0).use_empty()) {
2184 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2185 LoReg, NVT, InFlag);
2186 InFlag = Result.getValue(2);
2187 ReplaceUses(SDValue(Node, 0), Result);
2188 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2190 // Copy the high half of the result, if it is needed.
2191 if (!SDValue(Node, 1).use_empty()) {
2192 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2193 HiReg, NVT, InFlag);
2194 InFlag = Result.getValue(2);
2195 ReplaceUses(SDValue(Node, 1), Result);
2196 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2203 case ISD::UDIVREM: {
2204 SDValue N0 = Node->getOperand(0);
2205 SDValue N1 = Node->getOperand(1);
2207 bool isSigned = Opcode == ISD::SDIVREM;
2209 switch (NVT.getSimpleVT().SimpleTy) {
2210 default: llvm_unreachable("Unsupported VT!");
2211 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2212 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2213 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2214 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2217 switch (NVT.getSimpleVT().SimpleTy) {
2218 default: llvm_unreachable("Unsupported VT!");
2219 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2220 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2221 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2222 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2226 unsigned LoReg, HiReg, ClrReg;
2227 unsigned ClrOpcode, SExtOpcode;
2228 switch (NVT.getSimpleVT().SimpleTy) {
2229 default: llvm_unreachable("Unsupported VT!");
2231 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
2233 SExtOpcode = X86::CBW;
2236 LoReg = X86::AX; HiReg = X86::DX;
2237 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX;
2238 SExtOpcode = X86::CWD;
2241 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2242 ClrOpcode = X86::MOV32r0;
2243 SExtOpcode = X86::CDQ;
2246 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2247 ClrOpcode = X86::MOV64r0;
2248 SExtOpcode = X86::CQO;
2252 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2253 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2254 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2257 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2258 // Special case for div8, just use a move with zero extension to AX to
2259 // clear the upper 8 bits (AH).
2260 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2261 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2262 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2264 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2266 array_lengthof(Ops)), 0);
2267 Chain = Move.getValue(1);
2268 ReplaceUses(N0.getValue(1), Chain);
2271 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2272 Chain = CurDAG->getEntryNode();
2274 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2275 InFlag = Chain.getValue(1);
2278 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2279 LoReg, N0, SDValue()).getValue(1);
2280 if (isSigned && !signBitIsZero) {
2281 // Sign extend the low part into the high part.
2283 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2285 // Zero out the high part, effectively zero extending the input.
2287 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
2288 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2289 ClrNode, InFlag).getValue(1);
2294 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2297 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2298 array_lengthof(Ops));
2299 InFlag = SDValue(CNode, 1);
2300 // Update the chain.
2301 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2304 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2307 // Prevent use of AH in a REX instruction by referencing AX instead.
2308 // Shift it down 8 bits.
2309 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2310 !SDValue(Node, 1).use_empty()) {
2311 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2312 X86::AX, MVT::i16, InFlag);
2313 InFlag = Result.getValue(2);
2315 // If we also need AL (the quotient), get it by extracting a subreg from
2316 // Result. The fast register allocator does not like multiple CopyFromReg
2317 // nodes using aliasing registers.
2318 if (!SDValue(Node, 0).use_empty())
2319 ReplaceUses(SDValue(Node, 0),
2320 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2322 // Shift AX right by 8 bits instead of using AH.
2323 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2325 CurDAG->getTargetConstant(8, MVT::i8)),
2327 ReplaceUses(SDValue(Node, 1),
2328 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2330 // Copy the division (low) result, if it is needed.
2331 if (!SDValue(Node, 0).use_empty()) {
2332 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2333 LoReg, NVT, InFlag);
2334 InFlag = Result.getValue(2);
2335 ReplaceUses(SDValue(Node, 0), Result);
2336 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2338 // Copy the remainder (high) result, if it is needed.
2339 if (!SDValue(Node, 1).use_empty()) {
2340 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2341 HiReg, NVT, InFlag);
2342 InFlag = Result.getValue(2);
2343 ReplaceUses(SDValue(Node, 1), Result);
2344 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2350 SDValue N0 = Node->getOperand(0);
2351 SDValue N1 = Node->getOperand(1);
2353 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2354 // use a smaller encoding.
2355 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2356 HasNoSignedComparisonUses(Node))
2357 // Look past the truncate if CMP is the only use of it.
2358 N0 = N0.getOperand(0);
2359 if ((N0.getNode()->getOpcode() == ISD::AND ||
2360 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2361 N0.getNode()->hasOneUse() &&
2362 N0.getValueType() != MVT::i8 &&
2363 X86::isZeroNode(N1)) {
2364 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2367 // For example, convert "testl %eax, $8" to "testb %al, $8"
2368 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2369 (!(C->getZExtValue() & 0x80) ||
2370 HasNoSignedComparisonUses(Node))) {
2371 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2372 SDValue Reg = N0.getNode()->getOperand(0);
2374 // On x86-32, only the ABCD registers have 8-bit subregisters.
2375 if (!Subtarget->is64Bit()) {
2376 const TargetRegisterClass *TRC;
2377 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2378 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2379 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2380 default: llvm_unreachable("Unsupported TEST operand type!");
2382 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2383 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2384 Reg.getValueType(), Reg, RC), 0);
2387 // Extract the l-register.
2388 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2392 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
2395 // For example, "testl %eax, $2048" to "testb %ah, $8".
2396 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2397 (!(C->getZExtValue() & 0x8000) ||
2398 HasNoSignedComparisonUses(Node))) {
2399 // Shift the immediate right by 8 bits.
2400 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2402 SDValue Reg = N0.getNode()->getOperand(0);
2404 // Put the value in an ABCD register.
2405 const TargetRegisterClass *TRC;
2406 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2407 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2408 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2409 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2410 default: llvm_unreachable("Unsupported TEST operand type!");
2412 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2413 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2414 Reg.getValueType(), Reg, RC), 0);
2416 // Extract the h-register.
2417 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2420 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
2421 // target GR8_NOREX registers, so make sure the register class is
2423 return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32,
2424 Subreg, ShiftedImm);
2427 // For example, "testl %eax, $32776" to "testw %ax, $32776".
2428 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2429 N0.getValueType() != MVT::i16 &&
2430 (!(C->getZExtValue() & 0x8000) ||
2431 HasNoSignedComparisonUses(Node))) {
2432 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2433 SDValue Reg = N0.getNode()->getOperand(0);
2435 // Extract the 16-bit subregister.
2436 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2440 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
2443 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2444 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2445 N0.getValueType() == MVT::i64 &&
2446 (!(C->getZExtValue() & 0x80000000) ||
2447 HasNoSignedComparisonUses(Node))) {
2448 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2449 SDValue Reg = N0.getNode()->getOperand(0);
2451 // Extract the 32-bit subregister.
2452 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2456 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
2462 // Change a chain of {load; incr or dec; store} of the same value into
2463 // a simple increment or decrement through memory of that value, if the
2464 // uses of the modified value and its address are suitable.
2465 // The DEC64m tablegen pattern is currently not able to match the case where
2466 // the EFLAGS on the original DEC are used. (This also applies to
2467 // {INC,DEC}X{64,32,16,8}.)
2468 // We'll need to improve tablegen to allow flags to be transferred from a
2469 // node in the pattern to the result node. probably with a new keyword
2470 // for example, we have this
2471 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2472 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2473 // (implicit EFLAGS)]>;
2474 // but maybe need something like this
2475 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2476 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2477 // (transferrable EFLAGS)]>;
2479 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2480 SDValue StoredVal = StoreNode->getOperand(1);
2481 unsigned Opc = StoredVal->getOpcode();
2483 LoadSDNode *LoadNode = 0;
2485 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2486 LoadNode, InputChain))
2489 SDValue Base, Scale, Index, Disp, Segment;
2490 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2491 Base, Scale, Index, Disp, Segment))
2494 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2495 MemOp[0] = StoreNode->getMemOperand();
2496 MemOp[1] = LoadNode->getMemOperand();
2497 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2498 EVT LdVT = LoadNode->getMemoryVT();
2499 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
2500 MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
2501 Node->getDebugLoc(),
2502 MVT::i32, MVT::Other, Ops,
2503 array_lengthof(Ops));
2504 Result->setMemRefs(MemOp, MemOp + 2);
2506 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2507 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2513 SDNode *ResNode = SelectCode(Node);
2515 DEBUG(dbgs() << "=> ";
2516 if (ResNode == NULL || ResNode == Node)
2519 ResNode->dump(CurDAG);
2525 bool X86DAGToDAGISel::
2526 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2527 std::vector<SDValue> &OutOps) {
2528 SDValue Op0, Op1, Op2, Op3, Op4;
2529 switch (ConstraintCode) {
2530 case 'o': // offsetable ??
2531 case 'v': // not offsetable ??
2532 default: return true;
2534 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
2539 OutOps.push_back(Op0);
2540 OutOps.push_back(Op1);
2541 OutOps.push_back(Op2);
2542 OutOps.push_back(Op3);
2543 OutOps.push_back(Op4);
2547 /// createX86ISelDag - This pass converts a legalized DAG into a
2548 /// X86-specific DAG, ready for instruction scheduling.
2550 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2551 CodeGenOpt::Level OptLevel) {
2552 return new X86DAGToDAGISel(TM, OptLevel);