1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/SmallPtrSet.h"
41 #include "llvm/ADT/Statistic.h"
44 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
46 //===----------------------------------------------------------------------===//
47 // Pattern Matcher Implementation
48 //===----------------------------------------------------------------------===//
51 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
52 /// SDValue's instead of register numbers for the leaves of the matched
54 struct X86ISelAddressMode {
60 struct { // This is really a union, discriminated by BaseType!
71 BlockAddress *BlockAddr;
74 unsigned Align; // CP alignment.
75 unsigned char SymbolFlags; // X86II::MO_*
78 : BaseType(RegBase), Scale(1), IndexReg(), Disp(0),
79 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
80 SymbolFlags(X86II::MO_NO_FLAG) {
83 bool hasSymbolicDisplacement() const {
84 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
87 bool hasBaseOrIndexReg() const {
88 return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0;
91 /// isRIPRelative - Return true if this addressing mode is already RIP
93 bool isRIPRelative() const {
94 if (BaseType != RegBase) return false;
95 if (RegisterSDNode *RegNode =
96 dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode()))
97 return RegNode->getReg() == X86::RIP;
101 void setBaseReg(SDValue Reg) {
107 dbgs() << "X86ISelAddressMode " << this << '\n';
108 dbgs() << "Base.Reg ";
109 if (Base.Reg.getNode() != 0)
110 Base.Reg.getNode()->dump();
113 dbgs() << " Base.FrameIndex " << Base.FrameIndex << '\n'
114 << " Scale" << Scale << '\n'
116 if (IndexReg.getNode() != 0)
117 IndexReg.getNode()->dump();
120 dbgs() << " Disp " << Disp << '\n'
137 dbgs() << " JT" << JT << " Align" << Align << '\n';
143 //===--------------------------------------------------------------------===//
144 /// ISel - X86 specific code to select X86 machine instructions for
145 /// SelectionDAG operations.
147 class X86DAGToDAGISel : public SelectionDAGISel {
148 /// X86Lowering - This object fully describes how to lower LLVM code to an
149 /// X86-specific SelectionDAG.
150 X86TargetLowering &X86Lowering;
152 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
153 /// make the right decision when generating code for different targets.
154 const X86Subtarget *Subtarget;
156 /// OptForSize - If true, selector should try to optimize for code size
157 /// instead of performance.
161 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
162 : SelectionDAGISel(tm, OptLevel),
163 X86Lowering(*tm.getTargetLowering()),
164 Subtarget(&tm.getSubtarget<X86Subtarget>()),
167 virtual const char *getPassName() const {
168 return "X86 DAG->DAG Instruction Selection";
171 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
173 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
175 virtual void PreprocessISelDAG();
177 // Include the pieces autogenerated from the target description.
178 #include "X86GenDAGISel.inc"
181 SDNode *Select(SDNode *N);
182 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
183 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
185 bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
186 bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
187 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
188 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
189 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
191 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
192 bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
193 SDValue &Scale, SDValue &Index, SDValue &Disp,
195 bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base,
196 SDValue &Scale, SDValue &Index, SDValue &Disp);
197 bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
198 SDValue &Scale, SDValue &Index, SDValue &Disp);
199 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
200 SDValue &Base, SDValue &Scale,
201 SDValue &Index, SDValue &Disp,
203 SDValue &NodeWithChain);
205 bool TryFoldLoad(SDNode *P, SDValue N,
206 SDValue &Base, SDValue &Scale,
207 SDValue &Index, SDValue &Disp,
210 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
211 /// inline asm expressions.
212 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
214 std::vector<SDValue> &OutOps);
216 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
218 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
219 SDValue &Scale, SDValue &Index,
220 SDValue &Disp, SDValue &Segment) {
221 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
222 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
224 Scale = getI8Imm(AM.Scale);
226 // These are 32-bit even in 64-bit mode since RIP relative offset
229 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
232 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
233 AM.Align, AM.Disp, AM.SymbolFlags);
235 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
236 else if (AM.JT != -1)
237 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
238 else if (AM.BlockAddr)
239 Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
240 true, AM.SymbolFlags);
242 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
244 if (AM.Segment.getNode())
245 Segment = AM.Segment;
247 Segment = CurDAG->getRegister(0, MVT::i32);
250 /// getI8Imm - Return a target constant with the specified value, of type
252 inline SDValue getI8Imm(unsigned Imm) {
253 return CurDAG->getTargetConstant(Imm, MVT::i8);
256 /// getI16Imm - Return a target constant with the specified value, of type
258 inline SDValue getI16Imm(unsigned Imm) {
259 return CurDAG->getTargetConstant(Imm, MVT::i16);
262 /// getI32Imm - Return a target constant with the specified value, of type
264 inline SDValue getI32Imm(unsigned Imm) {
265 return CurDAG->getTargetConstant(Imm, MVT::i32);
268 /// getGlobalBaseReg - Return an SDNode that returns the value of
269 /// the global base register. Output instructions required to
270 /// initialize the global base register, if necessary.
272 SDNode *getGlobalBaseReg();
274 /// getTargetMachine - Return a reference to the TargetMachine, casted
275 /// to the target-specific type.
276 const X86TargetMachine &getTargetMachine() {
277 return static_cast<const X86TargetMachine &>(TM);
280 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
281 /// to the target-specific type.
282 const X86InstrInfo *getInstrInfo() {
283 return getTargetMachine().getInstrInfo();
290 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
291 if (OptLevel == CodeGenOpt::None) return false;
296 if (N.getOpcode() != ISD::LOAD)
299 // If N is a load, do additional profitability checks.
301 switch (U->getOpcode()) {
314 SDValue Op1 = U->getOperand(1);
316 // If the other operand is a 8-bit immediate we should fold the immediate
317 // instead. This reduces code size.
319 // movl 4(%esp), %eax
323 // addl 4(%esp), %eax
324 // The former is 2 bytes shorter. In case where the increment is 1, then
325 // the saving can be 4 bytes (by using incl %eax).
326 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
327 if (Imm->getAPIntValue().isSignedIntN(8))
330 // If the other operand is a TLS address, we should fold it instead.
333 // leal i@NTPOFF(%eax), %eax
335 // movl $i@NTPOFF, %eax
337 // if the block also has an access to a second TLS address this will save
339 // FIXME: This is probably also true for non TLS addresses.
340 if (Op1.getOpcode() == X86ISD::Wrapper) {
341 SDValue Val = Op1.getOperand(0);
342 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
352 /// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
353 /// operand and move load below the call's chain operand.
354 static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
355 SDValue Call, SDValue CallSeqStart) {
356 SmallVector<SDValue, 8> Ops;
357 SDValue Chain = CallSeqStart.getOperand(0);
358 if (Chain.getNode() == Load.getNode())
359 Ops.push_back(Load.getOperand(0));
361 assert(Chain.getOpcode() == ISD::TokenFactor &&
362 "Unexpected CallSeqStart chain operand");
363 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
364 if (Chain.getOperand(i).getNode() == Load.getNode())
365 Ops.push_back(Load.getOperand(0));
367 Ops.push_back(Chain.getOperand(i));
369 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
370 MVT::Other, &Ops[0], Ops.size());
372 Ops.push_back(NewChain);
374 for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i)
375 Ops.push_back(CallSeqStart.getOperand(i));
376 CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size());
377 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
378 Load.getOperand(1), Load.getOperand(2));
380 Ops.push_back(SDValue(Load.getNode(), 1));
381 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
382 Ops.push_back(Call.getOperand(i));
383 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
386 /// isCalleeLoad - Return true if call address is a load and it can be
387 /// moved below CALLSEQ_START and the chains leading up to the call.
388 /// Return the CALLSEQ_START by reference as a second output.
389 static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
390 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
392 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
395 LD->getAddressingMode() != ISD::UNINDEXED ||
396 LD->getExtensionType() != ISD::NON_EXTLOAD)
399 // Now let's find the callseq_start.
400 while (Chain.getOpcode() != ISD::CALLSEQ_START) {
401 if (!Chain.hasOneUse())
403 Chain = Chain.getOperand(0);
406 if (Chain.getOperand(0).getNode() == Callee.getNode())
408 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
409 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
410 Callee.getValue(1).hasOneUse())
415 void X86DAGToDAGISel::PreprocessISelDAG() {
416 // OptForSize is used in pattern predicates that isel is matching.
417 OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
419 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
420 E = CurDAG->allnodes_end(); I != E; ) {
421 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
423 if (OptLevel != CodeGenOpt::None && N->getOpcode() == X86ISD::CALL) {
424 /// Also try moving call address load from outside callseq_start to just
425 /// before the call to allow it to be folded.
443 SDValue Chain = N->getOperand(0);
444 SDValue Load = N->getOperand(1);
445 if (!isCalleeLoad(Load, Chain))
447 MoveBelowCallSeqStart(CurDAG, Load, SDValue(N, 0), Chain);
452 // Lower fpround and fpextend nodes that target the FP stack to be store and
453 // load to the stack. This is a gross hack. We would like to simply mark
454 // these as being illegal, but when we do that, legalize produces these when
455 // it expands calls, then expands these in the same legalize pass. We would
456 // like dag combine to be able to hack on these between the call expansion
457 // and the node legalization. As such this pass basically does "really
458 // late" legalization of these inline with the X86 isel pass.
459 // FIXME: This should only happen when not compiled with -O0.
460 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
463 // If the source and destination are SSE registers, then this is a legal
464 // conversion that should not be lowered.
465 EVT SrcVT = N->getOperand(0).getValueType();
466 EVT DstVT = N->getValueType(0);
467 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
468 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
469 if (SrcIsSSE && DstIsSSE)
472 if (!SrcIsSSE && !DstIsSSE) {
473 // If this is an FPStack extension, it is a noop.
474 if (N->getOpcode() == ISD::FP_EXTEND)
476 // If this is a value-preserving FPStack truncation, it is a noop.
477 if (N->getConstantOperandVal(1))
481 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
482 // FPStack has extload and truncstore. SSE can fold direct loads into other
483 // operations. Based on this, decide what we want to do.
485 if (N->getOpcode() == ISD::FP_ROUND)
486 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
488 MemVT = SrcIsSSE ? SrcVT : DstVT;
490 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
491 DebugLoc dl = N->getDebugLoc();
493 // FIXME: optimize the case where the src/dest is a load or store?
494 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
496 MemTmp, NULL, 0, MemVT,
498 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
499 NULL, 0, MemVT, false, false, 0);
501 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
502 // extload we created. This will cause general havok on the dag because
503 // anything below the conversion could be folded into other existing nodes.
504 // To avoid invalidating 'I', back it up to the convert node.
506 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
508 // Now that we did that, the node is dead. Increment the iterator to the
509 // next node to process, then delete N.
511 CurDAG->DeleteNode(N);
516 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
517 /// the main function.
518 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
519 MachineFrameInfo *MFI) {
520 const TargetInstrInfo *TII = TM.getInstrInfo();
521 if (Subtarget->isTargetCygMing())
522 BuildMI(BB, DebugLoc::getUnknownLoc(),
523 TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
526 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
527 // If this is main, emit special code for main.
528 MachineBasicBlock *BB = MF.begin();
529 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
530 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
534 bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N,
535 X86ISelAddressMode &AM) {
536 assert(N.getOpcode() == X86ISD::SegmentBaseAddress);
537 SDValue Segment = N.getOperand(0);
539 if (AM.Segment.getNode() == 0) {
540 AM.Segment = Segment;
547 bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
548 // This optimization is valid because the GNU TLS model defines that
549 // gs:0 (or fs:0 on X86-64) contains its own address.
550 // For more information see http://people.redhat.com/drepper/tls.pdf
552 SDValue Address = N.getOperand(1);
553 if (Address.getOpcode() == X86ISD::SegmentBaseAddress &&
554 !MatchSegmentBaseAddress (Address, AM))
560 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
561 /// into an addressing mode. These wrap things that will resolve down into a
562 /// symbol reference. If no match is possible, this returns true, otherwise it
564 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
565 // If the addressing mode already has a symbol as the displacement, we can
566 // never match another symbol.
567 if (AM.hasSymbolicDisplacement())
570 SDValue N0 = N.getOperand(0);
571 CodeModel::Model M = TM.getCodeModel();
573 // Handle X86-64 rip-relative addresses. We check this before checking direct
574 // folding because RIP is preferable to non-RIP accesses.
575 if (Subtarget->is64Bit() &&
576 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
577 // they cannot be folded into immediate fields.
578 // FIXME: This can be improved for kernel and other models?
579 (M == CodeModel::Small || M == CodeModel::Kernel) &&
580 // Base and index reg must be 0 in order to use %rip as base and lowering
582 !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
583 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
584 int64_t Offset = AM.Disp + G->getOffset();
585 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
586 AM.GV = G->getGlobal();
588 AM.SymbolFlags = G->getTargetFlags();
589 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
590 int64_t Offset = AM.Disp + CP->getOffset();
591 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
592 AM.CP = CP->getConstVal();
593 AM.Align = CP->getAlignment();
595 AM.SymbolFlags = CP->getTargetFlags();
596 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
597 AM.ES = S->getSymbol();
598 AM.SymbolFlags = S->getTargetFlags();
599 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
600 AM.JT = J->getIndex();
601 AM.SymbolFlags = J->getTargetFlags();
603 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
604 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
607 if (N.getOpcode() == X86ISD::WrapperRIP)
608 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
612 // Handle the case when globals fit in our immediate field: This is true for
613 // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
614 // mode, this results in a non-RIP-relative computation.
615 if (!Subtarget->is64Bit() ||
616 ((M == CodeModel::Small || M == CodeModel::Kernel) &&
617 TM.getRelocationModel() == Reloc::Static)) {
618 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
619 AM.GV = G->getGlobal();
620 AM.Disp += G->getOffset();
621 AM.SymbolFlags = G->getTargetFlags();
622 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
623 AM.CP = CP->getConstVal();
624 AM.Align = CP->getAlignment();
625 AM.Disp += CP->getOffset();
626 AM.SymbolFlags = CP->getTargetFlags();
627 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
628 AM.ES = S->getSymbol();
629 AM.SymbolFlags = S->getTargetFlags();
630 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
631 AM.JT = J->getIndex();
632 AM.SymbolFlags = J->getTargetFlags();
634 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
635 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
643 /// MatchAddress - Add the specified node to the specified addressing mode,
644 /// returning true if it cannot be done. This just pattern matches for the
646 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
647 if (MatchAddressRecursively(N, AM, 0))
650 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
651 // a smaller encoding and avoids a scaled-index.
653 AM.BaseType == X86ISelAddressMode::RegBase &&
654 AM.Base.Reg.getNode() == 0) {
655 AM.Base.Reg = AM.IndexReg;
659 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
660 // because it has a smaller encoding.
661 // TODO: Which other code models can use this?
662 if (TM.getCodeModel() == CodeModel::Small &&
663 Subtarget->is64Bit() &&
665 AM.BaseType == X86ISelAddressMode::RegBase &&
666 AM.Base.Reg.getNode() == 0 &&
667 AM.IndexReg.getNode() == 0 &&
668 AM.SymbolFlags == X86II::MO_NO_FLAG &&
669 AM.hasSymbolicDisplacement())
670 AM.Base.Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
675 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
677 bool is64Bit = Subtarget->is64Bit();
678 DebugLoc dl = N.getDebugLoc();
680 dbgs() << "MatchAddress: ";
685 return MatchAddressBase(N, AM);
687 CodeModel::Model M = TM.getCodeModel();
689 // If this is already a %rip relative address, we can only merge immediates
690 // into it. Instead of handling this in every case, we handle it here.
691 // RIP relative addressing: %rip + 32-bit displacement!
692 if (AM.isRIPRelative()) {
693 // FIXME: JumpTable and ExternalSymbol address currently don't like
694 // displacements. It isn't very important, but this should be fixed for
696 if (!AM.ES && AM.JT != -1) return true;
698 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
699 int64_t Val = AM.Disp + Cst->getSExtValue();
700 if (X86::isOffsetSuitableForCodeModel(Val, M,
701 AM.hasSymbolicDisplacement())) {
709 switch (N.getOpcode()) {
711 case ISD::Constant: {
712 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
714 X86::isOffsetSuitableForCodeModel(AM.Disp + Val, M,
715 AM.hasSymbolicDisplacement())) {
722 case X86ISD::SegmentBaseAddress:
723 if (!MatchSegmentBaseAddress(N, AM))
727 case X86ISD::Wrapper:
728 case X86ISD::WrapperRIP:
729 if (!MatchWrapper(N, AM))
734 if (!MatchLoad(N, AM))
738 case ISD::FrameIndex:
739 if (AM.BaseType == X86ISelAddressMode::RegBase
740 && AM.Base.Reg.getNode() == 0) {
741 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
742 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
748 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
752 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
753 unsigned Val = CN->getZExtValue();
754 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
755 // that the base operand remains free for further matching. If
756 // the base doesn't end up getting used, a post-processing step
757 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
758 if (Val == 1 || Val == 2 || Val == 3) {
760 SDValue ShVal = N.getNode()->getOperand(0);
762 // Okay, we know that we have a scale by now. However, if the scaled
763 // value is an add of something and a constant, we can fold the
764 // constant into the disp field here.
765 if (ShVal.getNode()->getOpcode() == ISD::ADD &&
766 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
767 AM.IndexReg = ShVal.getNode()->getOperand(0);
768 ConstantSDNode *AddVal =
769 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
770 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
772 X86::isOffsetSuitableForCodeModel(Disp, M,
773 AM.hasSymbolicDisplacement()))
787 // A mul_lohi where we need the low part can be folded as a plain multiply.
788 if (N.getResNo() != 0) break;
791 case X86ISD::MUL_IMM:
792 // X*[3,5,9] -> X+X*[2,4,8]
793 if (AM.BaseType == X86ISelAddressMode::RegBase &&
794 AM.Base.Reg.getNode() == 0 &&
795 AM.IndexReg.getNode() == 0) {
797 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
798 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
799 CN->getZExtValue() == 9) {
800 AM.Scale = unsigned(CN->getZExtValue())-1;
802 SDValue MulVal = N.getNode()->getOperand(0);
805 // Okay, we know that we have a scale by now. However, if the scaled
806 // value is an add of something and a constant, we can fold the
807 // constant into the disp field here.
808 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
809 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
810 Reg = MulVal.getNode()->getOperand(0);
811 ConstantSDNode *AddVal =
812 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
813 uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
816 X86::isOffsetSuitableForCodeModel(Disp, M,
817 AM.hasSymbolicDisplacement()))
820 Reg = N.getNode()->getOperand(0);
822 Reg = N.getNode()->getOperand(0);
825 AM.IndexReg = AM.Base.Reg = Reg;
832 // Given A-B, if A can be completely folded into the address and
833 // the index field with the index field unused, use -B as the index.
834 // This is a win if a has multiple parts that can be folded into
835 // the address. Also, this saves a mov if the base register has
836 // other uses, since it avoids a two-address sub instruction, however
837 // it costs an additional mov if the index register has other uses.
839 // Test if the LHS of the sub can be folded.
840 X86ISelAddressMode Backup = AM;
841 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
845 // Test if the index field is free for use.
846 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
851 SDValue RHS = N.getNode()->getOperand(1);
852 // If the RHS involves a register with multiple uses, this
853 // transformation incurs an extra mov, due to the neg instruction
854 // clobbering its operand.
855 if (!RHS.getNode()->hasOneUse() ||
856 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
857 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
858 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
859 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
860 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
862 // If the base is a register with multiple uses, this
863 // transformation may save a mov.
864 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
865 AM.Base.Reg.getNode() &&
866 !AM.Base.Reg.getNode()->hasOneUse()) ||
867 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
869 // If the folded LHS was interesting, this transformation saves
870 // address arithmetic.
871 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
872 ((AM.Disp != 0) && (Backup.Disp == 0)) +
873 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
875 // If it doesn't look like it may be an overall win, don't do it.
881 // Ok, the transformation is legal and appears profitable. Go for it.
882 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
883 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
887 // Insert the new nodes into the topological ordering.
888 if (Zero.getNode()->getNodeId() == -1 ||
889 Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
890 CurDAG->RepositionNode(N.getNode(), Zero.getNode());
891 Zero.getNode()->setNodeId(N.getNode()->getNodeId());
893 if (Neg.getNode()->getNodeId() == -1 ||
894 Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
895 CurDAG->RepositionNode(N.getNode(), Neg.getNode());
896 Neg.getNode()->setNodeId(N.getNode()->getNodeId());
902 X86ISelAddressMode Backup = AM;
903 if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1) &&
904 !MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1))
907 if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1) &&
908 !MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1))
912 // If we couldn't fold both operands into the address at the same time,
913 // see if we can just put each operand into a register and fold at least
915 if (AM.BaseType == X86ISelAddressMode::RegBase &&
916 !AM.Base.Reg.getNode() &&
917 !AM.IndexReg.getNode()) {
918 AM.Base.Reg = N.getNode()->getOperand(0);
919 AM.IndexReg = N.getNode()->getOperand(1);
927 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
928 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
929 X86ISelAddressMode Backup = AM;
930 uint64_t Offset = CN->getSExtValue();
931 // Start with the LHS as an addr mode.
932 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
933 // Address could not have picked a GV address for the displacement.
935 // On x86-64, the resultant disp must fit in 32-bits.
937 X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M,
938 AM.hasSymbolicDisplacement())) &&
939 // Check to see if the LHS & C is zero.
940 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
949 // Perform some heroic transforms on an and of a constant-count shift
950 // with a constant to enable use of the scaled offset field.
952 SDValue Shift = N.getOperand(0);
953 if (Shift.getNumOperands() != 2) break;
955 // Scale must not be used already.
956 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
958 SDValue X = Shift.getOperand(0);
959 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
960 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
961 if (!C1 || !C2) break;
963 // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This
964 // allows us to convert the shift and and into an h-register extract and
966 if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) {
967 unsigned ScaleLog = 8 - C1->getZExtValue();
968 if (ScaleLog > 0 && ScaleLog < 4 &&
969 C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
970 SDValue Eight = CurDAG->getConstant(8, MVT::i8);
971 SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
972 SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
974 SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
976 SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
977 SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
980 // Insert the new nodes into the topological ordering.
981 if (Eight.getNode()->getNodeId() == -1 ||
982 Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
983 CurDAG->RepositionNode(X.getNode(), Eight.getNode());
984 Eight.getNode()->setNodeId(X.getNode()->getNodeId());
986 if (Mask.getNode()->getNodeId() == -1 ||
987 Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
988 CurDAG->RepositionNode(X.getNode(), Mask.getNode());
989 Mask.getNode()->setNodeId(X.getNode()->getNodeId());
991 if (Srl.getNode()->getNodeId() == -1 ||
992 Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
993 CurDAG->RepositionNode(Shift.getNode(), Srl.getNode());
994 Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
996 if (And.getNode()->getNodeId() == -1 ||
997 And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
998 CurDAG->RepositionNode(N.getNode(), And.getNode());
999 And.getNode()->setNodeId(N.getNode()->getNodeId());
1001 if (ShlCount.getNode()->getNodeId() == -1 ||
1002 ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1003 CurDAG->RepositionNode(X.getNode(), ShlCount.getNode());
1004 ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
1006 if (Shl.getNode()->getNodeId() == -1 ||
1007 Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1008 CurDAG->RepositionNode(N.getNode(), Shl.getNode());
1009 Shl.getNode()->setNodeId(N.getNode()->getNodeId());
1011 CurDAG->ReplaceAllUsesWith(N, Shl);
1013 AM.Scale = (1 << ScaleLog);
1018 // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
1019 // allows us to fold the shift into this addressing mode.
1020 if (Shift.getOpcode() != ISD::SHL) break;
1022 // Not likely to be profitable if either the AND or SHIFT node has more
1023 // than one use (unless all uses are for address computation). Besides,
1024 // isel mechanism requires their node ids to be reused.
1025 if (!N.hasOneUse() || !Shift.hasOneUse())
1028 // Verify that the shift amount is something we can fold.
1029 unsigned ShiftCst = C1->getZExtValue();
1030 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
1033 // Get the new AND mask, this folds to a constant.
1034 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1035 SDValue(C2, 0), SDValue(C1, 0));
1036 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
1038 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1039 NewAND, SDValue(C1, 0));
1041 // Insert the new nodes into the topological ordering.
1042 if (C1->getNodeId() > X.getNode()->getNodeId()) {
1043 CurDAG->RepositionNode(X.getNode(), C1);
1044 C1->setNodeId(X.getNode()->getNodeId());
1046 if (NewANDMask.getNode()->getNodeId() == -1 ||
1047 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1048 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
1049 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
1051 if (NewAND.getNode()->getNodeId() == -1 ||
1052 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1053 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
1054 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
1056 if (NewSHIFT.getNode()->getNodeId() == -1 ||
1057 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1058 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
1059 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
1062 CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
1064 AM.Scale = 1 << ShiftCst;
1065 AM.IndexReg = NewAND;
1070 return MatchAddressBase(N, AM);
1073 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1074 /// specified addressing mode without any further recursion.
1075 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1076 // Is the base register already occupied?
1077 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
1078 // If so, check to see if the scale index register is set.
1079 if (AM.IndexReg.getNode() == 0) {
1085 // Otherwise, we cannot select it.
1089 // Default, generate it as a register.
1090 AM.BaseType = X86ISelAddressMode::RegBase;
1095 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1096 /// It returns the operands which make up the maximal addressing mode it can
1097 /// match by reference.
1098 bool X86DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
1099 SDValue &Scale, SDValue &Index,
1100 SDValue &Disp, SDValue &Segment) {
1101 X86ISelAddressMode AM;
1102 if (MatchAddress(N, AM))
1105 EVT VT = N.getValueType();
1106 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1107 if (!AM.Base.Reg.getNode())
1108 AM.Base.Reg = CurDAG->getRegister(0, VT);
1111 if (!AM.IndexReg.getNode())
1112 AM.IndexReg = CurDAG->getRegister(0, VT);
1114 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1118 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1119 /// match a load whose top elements are either undef or zeros. The load flavor
1120 /// is derived from the type of N, which is either v4f32 or v2f64.
1123 /// PatternChainNode: this is the matched node that has a chain input and
1125 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1126 SDValue N, SDValue &Base,
1127 SDValue &Scale, SDValue &Index,
1128 SDValue &Disp, SDValue &Segment,
1129 SDValue &PatternNodeWithChain) {
1130 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1131 PatternNodeWithChain = N.getOperand(0);
1132 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1133 PatternNodeWithChain.hasOneUse() &&
1134 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1135 IsLegalToFold(N.getOperand(0), N.getNode(), Root)) {
1136 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1137 if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp,Segment))
1143 // Also handle the case where we explicitly require zeros in the top
1144 // elements. This is a vector shuffle from the zero vector.
1145 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1146 // Check to see if the top elements are all zeros (or bitcast of zeros).
1147 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1148 N.getOperand(0).getNode()->hasOneUse() &&
1149 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1150 N.getOperand(0).getOperand(0).hasOneUse() &&
1151 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1152 IsLegalToFold(N.getOperand(0), N.getNode(), Root)) {
1153 // Okay, this is a zero extending load. Fold it.
1154 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1155 if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1157 PatternNodeWithChain = SDValue(LD, 0);
1164 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1165 /// mode it matches can be cost effectively emitted as an LEA instruction.
1166 bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
1167 SDValue &Base, SDValue &Scale,
1168 SDValue &Index, SDValue &Disp) {
1169 X86ISelAddressMode AM;
1171 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1173 SDValue Copy = AM.Segment;
1174 SDValue T = CurDAG->getRegister(0, MVT::i32);
1176 if (MatchAddress(N, AM))
1178 assert (T == AM.Segment);
1181 EVT VT = N.getValueType();
1182 unsigned Complexity = 0;
1183 if (AM.BaseType == X86ISelAddressMode::RegBase)
1184 if (AM.Base.Reg.getNode())
1187 AM.Base.Reg = CurDAG->getRegister(0, VT);
1188 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1191 if (AM.IndexReg.getNode())
1194 AM.IndexReg = CurDAG->getRegister(0, VT);
1196 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1201 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1202 // to a LEA. This is determined with some expermentation but is by no means
1203 // optimal (especially for code size consideration). LEA is nice because of
1204 // its three-address nature. Tweak the cost function again when we can run
1205 // convertToThreeAddress() at register allocation time.
1206 if (AM.hasSymbolicDisplacement()) {
1207 // For X86-64, we should always use lea to materialize RIP relative
1209 if (Subtarget->is64Bit())
1215 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
1218 // If it isn't worth using an LEA, reject it.
1219 if (Complexity <= 2)
1223 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1227 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1228 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
1229 SDValue &Scale, SDValue &Index,
1231 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1232 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1234 X86ISelAddressMode AM;
1235 AM.GV = GA->getGlobal();
1236 AM.Disp += GA->getOffset();
1237 AM.Base.Reg = CurDAG->getRegister(0, N.getValueType());
1238 AM.SymbolFlags = GA->getTargetFlags();
1240 if (N.getValueType() == MVT::i32) {
1242 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1244 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1248 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1253 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1254 SDValue &Base, SDValue &Scale,
1255 SDValue &Index, SDValue &Disp,
1257 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1258 !IsProfitableToFold(N, P, P) ||
1259 !IsLegalToFold(N, P, P))
1262 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
1265 /// getGlobalBaseReg - Return an SDNode that returns the value of
1266 /// the global base register. Output instructions required to
1267 /// initialize the global base register, if necessary.
1269 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1270 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1271 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1274 static SDNode *FindCallStartFromCall(SDNode *Node) {
1275 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1276 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1277 "Node doesn't have a token chain argument!");
1278 return FindCallStartFromCall(Node->getOperand(0).getNode());
1281 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1282 SDValue Chain = Node->getOperand(0);
1283 SDValue In1 = Node->getOperand(1);
1284 SDValue In2L = Node->getOperand(2);
1285 SDValue In2H = Node->getOperand(3);
1286 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1287 if (!SelectAddr(In1.getNode(), In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1289 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1290 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1291 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1292 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1293 MVT::i32, MVT::i32, MVT::Other, Ops,
1294 array_lengthof(Ops));
1295 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1299 SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
1300 if (Node->hasAnyUseOfValue(0))
1303 // Optimize common patterns for __sync_add_and_fetch and
1304 // __sync_sub_and_fetch where the result is not used. This allows us
1305 // to use "lock" version of add, sub, inc, dec instructions.
1306 // FIXME: Do not use special instructions but instead add the "lock"
1307 // prefix to the target node somehow. The extra information will then be
1308 // transferred to machine instruction and it denotes the prefix.
1309 SDValue Chain = Node->getOperand(0);
1310 SDValue Ptr = Node->getOperand(1);
1311 SDValue Val = Node->getOperand(2);
1312 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1313 if (!SelectAddr(Ptr.getNode(), Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1316 bool isInc = false, isDec = false, isSub = false, isCN = false;
1317 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1320 int64_t CNVal = CN->getSExtValue();
1323 else if (CNVal == -1)
1325 else if (CNVal >= 0)
1326 Val = CurDAG->getTargetConstant(CNVal, NVT);
1329 Val = CurDAG->getTargetConstant(-CNVal, NVT);
1331 } else if (Val.hasOneUse() &&
1332 Val.getOpcode() == ISD::SUB &&
1333 X86::isZeroNode(Val.getOperand(0))) {
1335 Val = Val.getOperand(1);
1339 switch (NVT.getSimpleVT().SimpleTy) {
1343 Opc = X86::LOCK_INC8m;
1345 Opc = X86::LOCK_DEC8m;
1348 Opc = X86::LOCK_SUB8mi;
1350 Opc = X86::LOCK_SUB8mr;
1353 Opc = X86::LOCK_ADD8mi;
1355 Opc = X86::LOCK_ADD8mr;
1360 Opc = X86::LOCK_INC16m;
1362 Opc = X86::LOCK_DEC16m;
1365 if (Predicate_immSext8(Val.getNode()))
1366 Opc = X86::LOCK_SUB16mi8;
1368 Opc = X86::LOCK_SUB16mi;
1370 Opc = X86::LOCK_SUB16mr;
1373 if (Predicate_immSext8(Val.getNode()))
1374 Opc = X86::LOCK_ADD16mi8;
1376 Opc = X86::LOCK_ADD16mi;
1378 Opc = X86::LOCK_ADD16mr;
1383 Opc = X86::LOCK_INC32m;
1385 Opc = X86::LOCK_DEC32m;
1388 if (Predicate_immSext8(Val.getNode()))
1389 Opc = X86::LOCK_SUB32mi8;
1391 Opc = X86::LOCK_SUB32mi;
1393 Opc = X86::LOCK_SUB32mr;
1396 if (Predicate_immSext8(Val.getNode()))
1397 Opc = X86::LOCK_ADD32mi8;
1399 Opc = X86::LOCK_ADD32mi;
1401 Opc = X86::LOCK_ADD32mr;
1406 Opc = X86::LOCK_INC64m;
1408 Opc = X86::LOCK_DEC64m;
1410 Opc = X86::LOCK_SUB64mr;
1412 if (Predicate_immSext8(Val.getNode()))
1413 Opc = X86::LOCK_SUB64mi8;
1414 else if (Predicate_i64immSExt32(Val.getNode()))
1415 Opc = X86::LOCK_SUB64mi32;
1418 Opc = X86::LOCK_ADD64mr;
1420 if (Predicate_immSext8(Val.getNode()))
1421 Opc = X86::LOCK_ADD64mi8;
1422 else if (Predicate_i64immSExt32(Val.getNode()))
1423 Opc = X86::LOCK_ADD64mi32;
1429 DebugLoc dl = Node->getDebugLoc();
1430 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1432 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1433 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1434 if (isInc || isDec) {
1435 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1436 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
1437 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1438 SDValue RetVals[] = { Undef, Ret };
1439 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1441 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1442 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1443 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1444 SDValue RetVals[] = { Undef, Ret };
1445 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1449 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1450 /// any uses which require the SF or OF bits to be accurate.
1451 static bool HasNoSignedComparisonUses(SDNode *N) {
1452 // Examine each user of the node.
1453 for (SDNode::use_iterator UI = N->use_begin(),
1454 UE = N->use_end(); UI != UE; ++UI) {
1455 // Only examine CopyToReg uses.
1456 if (UI->getOpcode() != ISD::CopyToReg)
1458 // Only examine CopyToReg uses that copy to EFLAGS.
1459 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1462 // Examine each user of the CopyToReg use.
1463 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1464 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1465 // Only examine the Flag result.
1466 if (FlagUI.getUse().getResNo() != 1) continue;
1467 // Anything unusual: assume conservatively.
1468 if (!FlagUI->isMachineOpcode()) return false;
1469 // Examine the opcode of the user.
1470 switch (FlagUI->getMachineOpcode()) {
1471 // These comparisons don't treat the most significant bit specially.
1472 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1473 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1474 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1475 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1476 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1477 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1478 case X86::CMOVA16rr: case X86::CMOVA16rm:
1479 case X86::CMOVA32rr: case X86::CMOVA32rm:
1480 case X86::CMOVA64rr: case X86::CMOVA64rm:
1481 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1482 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1483 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1484 case X86::CMOVB16rr: case X86::CMOVB16rm:
1485 case X86::CMOVB32rr: case X86::CMOVB32rm:
1486 case X86::CMOVB64rr: case X86::CMOVB64rm:
1487 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1488 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1489 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1490 case X86::CMOVE16rr: case X86::CMOVE16rm:
1491 case X86::CMOVE32rr: case X86::CMOVE32rm:
1492 case X86::CMOVE64rr: case X86::CMOVE64rm:
1493 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1494 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1495 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1496 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1497 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1498 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1499 case X86::CMOVP16rr: case X86::CMOVP16rm:
1500 case X86::CMOVP32rr: case X86::CMOVP32rm:
1501 case X86::CMOVP64rr: case X86::CMOVP64rm:
1503 // Anything else: assume conservatively.
1504 default: return false;
1511 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1512 EVT NVT = Node->getValueType(0);
1514 unsigned Opcode = Node->getOpcode();
1515 DebugLoc dl = Node->getDebugLoc();
1517 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1519 if (Node->isMachineOpcode()) {
1520 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
1521 return NULL; // Already selected.
1526 case X86ISD::GlobalBaseReg:
1527 return getGlobalBaseReg();
1529 case X86ISD::ATOMOR64_DAG:
1530 return SelectAtomic64(Node, X86::ATOMOR6432);
1531 case X86ISD::ATOMXOR64_DAG:
1532 return SelectAtomic64(Node, X86::ATOMXOR6432);
1533 case X86ISD::ATOMADD64_DAG:
1534 return SelectAtomic64(Node, X86::ATOMADD6432);
1535 case X86ISD::ATOMSUB64_DAG:
1536 return SelectAtomic64(Node, X86::ATOMSUB6432);
1537 case X86ISD::ATOMNAND64_DAG:
1538 return SelectAtomic64(Node, X86::ATOMNAND6432);
1539 case X86ISD::ATOMAND64_DAG:
1540 return SelectAtomic64(Node, X86::ATOMAND6432);
1541 case X86ISD::ATOMSWAP64_DAG:
1542 return SelectAtomic64(Node, X86::ATOMSWAP6432);
1544 case ISD::ATOMIC_LOAD_ADD: {
1545 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
1551 case ISD::SMUL_LOHI:
1552 case ISD::UMUL_LOHI: {
1553 SDValue N0 = Node->getOperand(0);
1554 SDValue N1 = Node->getOperand(1);
1556 bool isSigned = Opcode == ISD::SMUL_LOHI;
1558 switch (NVT.getSimpleVT().SimpleTy) {
1559 default: llvm_unreachable("Unsupported VT!");
1560 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1561 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1562 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1563 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1566 switch (NVT.getSimpleVT().SimpleTy) {
1567 default: llvm_unreachable("Unsupported VT!");
1568 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1569 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1570 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1571 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1575 unsigned LoReg, HiReg;
1576 switch (NVT.getSimpleVT().SimpleTy) {
1577 default: llvm_unreachable("Unsupported VT!");
1578 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1579 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1580 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1581 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1584 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1585 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1586 // Multiply is commmutative.
1588 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1593 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
1594 N0, SDValue()).getValue(1);
1597 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1600 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1601 array_lengthof(Ops));
1602 InFlag = SDValue(CNode, 1);
1603 // Update the chain.
1604 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1607 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1610 // Copy the low half of the result, if it is needed.
1611 if (!SDValue(Node, 0).use_empty()) {
1612 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1613 LoReg, NVT, InFlag);
1614 InFlag = Result.getValue(2);
1615 ReplaceUses(SDValue(Node, 0), Result);
1616 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1618 // Copy the high half of the result, if it is needed.
1619 if (!SDValue(Node, 1).use_empty()) {
1621 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1622 // Prevent use of AH in a REX instruction by referencing AX instead.
1623 // Shift it down 8 bits.
1624 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1625 X86::AX, MVT::i16, InFlag);
1626 InFlag = Result.getValue(2);
1627 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
1629 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1630 // Then truncate it down to i8.
1631 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
1634 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1635 HiReg, NVT, InFlag);
1636 InFlag = Result.getValue(2);
1638 ReplaceUses(SDValue(Node, 1), Result);
1639 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1646 case ISD::UDIVREM: {
1647 SDValue N0 = Node->getOperand(0);
1648 SDValue N1 = Node->getOperand(1);
1650 bool isSigned = Opcode == ISD::SDIVREM;
1652 switch (NVT.getSimpleVT().SimpleTy) {
1653 default: llvm_unreachable("Unsupported VT!");
1654 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1655 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1656 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1657 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1660 switch (NVT.getSimpleVT().SimpleTy) {
1661 default: llvm_unreachable("Unsupported VT!");
1662 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1663 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1664 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1665 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1669 unsigned LoReg, HiReg, ClrReg;
1670 unsigned ClrOpcode, SExtOpcode;
1671 switch (NVT.getSimpleVT().SimpleTy) {
1672 default: llvm_unreachable("Unsupported VT!");
1674 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
1676 SExtOpcode = X86::CBW;
1679 LoReg = X86::AX; HiReg = X86::DX;
1680 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX;
1681 SExtOpcode = X86::CWD;
1684 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
1685 ClrOpcode = X86::MOV32r0;
1686 SExtOpcode = X86::CDQ;
1689 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
1690 ClrOpcode = X86::MOV64r0;
1691 SExtOpcode = X86::CQO;
1695 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1696 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1697 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
1700 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
1701 // Special case for div8, just use a move with zero extension to AX to
1702 // clear the upper 8 bits (AH).
1703 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
1704 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
1705 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
1707 SDValue(CurDAG->getMachineNode(X86::MOVZX16rm8, dl, MVT::i16,
1709 array_lengthof(Ops)), 0);
1710 Chain = Move.getValue(1);
1711 ReplaceUses(N0.getValue(1), Chain);
1714 SDValue(CurDAG->getMachineNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0);
1715 Chain = CurDAG->getEntryNode();
1717 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue());
1718 InFlag = Chain.getValue(1);
1721 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
1722 LoReg, N0, SDValue()).getValue(1);
1723 if (isSigned && !signBitIsZero) {
1724 // Sign extend the low part into the high part.
1726 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
1728 // Zero out the high part, effectively zero extending the input.
1730 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
1731 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
1732 ClrNode, InFlag).getValue(1);
1737 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1740 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1741 array_lengthof(Ops));
1742 InFlag = SDValue(CNode, 1);
1743 // Update the chain.
1744 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1747 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1750 // Copy the division (low) result, if it is needed.
1751 if (!SDValue(Node, 0).use_empty()) {
1752 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1753 LoReg, NVT, InFlag);
1754 InFlag = Result.getValue(2);
1755 ReplaceUses(SDValue(Node, 0), Result);
1756 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1758 // Copy the remainder (high) result, if it is needed.
1759 if (!SDValue(Node, 1).use_empty()) {
1761 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1762 // Prevent use of AH in a REX instruction by referencing AX instead.
1763 // Shift it down 8 bits.
1764 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1765 X86::AX, MVT::i16, InFlag);
1766 InFlag = Result.getValue(2);
1767 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
1769 CurDAG->getTargetConstant(8, MVT::i8)),
1771 // Then truncate it down to i8.
1772 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
1775 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1776 HiReg, NVT, InFlag);
1777 InFlag = Result.getValue(2);
1779 ReplaceUses(SDValue(Node, 1), Result);
1780 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1786 SDValue N0 = Node->getOperand(0);
1787 SDValue N1 = Node->getOperand(1);
1789 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
1790 // use a smaller encoding.
1791 if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
1792 N0.getValueType() != MVT::i8 &&
1793 X86::isZeroNode(N1)) {
1794 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
1797 // For example, convert "testl %eax, $8" to "testb %al, $8"
1798 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
1799 (!(C->getZExtValue() & 0x80) ||
1800 HasNoSignedComparisonUses(Node))) {
1801 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
1802 SDValue Reg = N0.getNode()->getOperand(0);
1804 // On x86-32, only the ABCD registers have 8-bit subregisters.
1805 if (!Subtarget->is64Bit()) {
1806 TargetRegisterClass *TRC = 0;
1807 switch (N0.getValueType().getSimpleVT().SimpleTy) {
1808 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
1809 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
1810 default: llvm_unreachable("Unsupported TEST operand type!");
1812 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
1813 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
1814 Reg.getValueType(), Reg, RC), 0);
1817 // Extract the l-register.
1818 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
1822 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
1825 // For example, "testl %eax, $2048" to "testb %ah, $8".
1826 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
1827 (!(C->getZExtValue() & 0x8000) ||
1828 HasNoSignedComparisonUses(Node))) {
1829 // Shift the immediate right by 8 bits.
1830 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
1832 SDValue Reg = N0.getNode()->getOperand(0);
1834 // Put the value in an ABCD register.
1835 TargetRegisterClass *TRC = 0;
1836 switch (N0.getValueType().getSimpleVT().SimpleTy) {
1837 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
1838 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
1839 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
1840 default: llvm_unreachable("Unsupported TEST operand type!");
1842 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
1843 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
1844 Reg.getValueType(), Reg, RC), 0);
1846 // Extract the h-register.
1847 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT_HI, dl,
1850 // Emit a testb. No special NOREX tricks are needed since there's
1851 // only one GPR operand!
1852 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
1853 Subreg, ShiftedImm);
1856 // For example, "testl %eax, $32776" to "testw %ax, $32776".
1857 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
1858 N0.getValueType() != MVT::i16 &&
1859 (!(C->getZExtValue() & 0x8000) ||
1860 HasNoSignedComparisonUses(Node))) {
1861 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
1862 SDValue Reg = N0.getNode()->getOperand(0);
1864 // Extract the 16-bit subregister.
1865 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_16BIT, dl,
1869 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
1872 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
1873 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
1874 N0.getValueType() == MVT::i64 &&
1875 (!(C->getZExtValue() & 0x80000000) ||
1876 HasNoSignedComparisonUses(Node))) {
1877 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
1878 SDValue Reg = N0.getNode()->getOperand(0);
1880 // Extract the 32-bit subregister.
1881 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_32BIT, dl,
1885 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
1892 SDNode *ResNode = SelectCode(Node);
1894 DEBUG(dbgs() << "=> ";
1895 if (ResNode == NULL || ResNode == Node)
1898 ResNode->dump(CurDAG);
1904 bool X86DAGToDAGISel::
1905 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1906 std::vector<SDValue> &OutOps) {
1907 SDValue Op0, Op1, Op2, Op3, Op4;
1908 switch (ConstraintCode) {
1909 case 'o': // offsetable ??
1910 case 'v': // not offsetable ??
1911 default: return true;
1913 if (!SelectAddr(Op.getNode(), Op, Op0, Op1, Op2, Op3, Op4))
1918 OutOps.push_back(Op0);
1919 OutOps.push_back(Op1);
1920 OutOps.push_back(Op2);
1921 OutOps.push_back(Op3);
1922 OutOps.push_back(Op4);
1926 /// createX86ISelDag - This pass converts a legalized DAG into a
1927 /// X86-specific DAG, ready for instruction scheduling.
1929 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
1930 llvm::CodeGenOpt::Level OptLevel) {
1931 return new X86DAGToDAGISel(TM, OptLevel);