1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86RegisterInfo.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
39 #define DEBUG_TYPE "x86-isel"
41 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
43 //===----------------------------------------------------------------------===//
44 // Pattern Matcher Implementation
45 //===----------------------------------------------------------------------===//
48 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
49 /// SDValue's instead of register numbers for the leaves of the matched
51 struct X86ISelAddressMode {
57 // This is really a union, discriminated by BaseType!
65 const GlobalValue *GV;
67 const BlockAddress *BlockAddr;
70 unsigned Align; // CP alignment.
71 unsigned char SymbolFlags; // X86II::MO_*
74 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
75 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
76 JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {
79 bool hasSymbolicDisplacement() const {
80 return GV != nullptr || CP != nullptr || ES != nullptr ||
81 JT != -1 || BlockAddr != nullptr;
84 bool hasBaseOrIndexReg() const {
85 return BaseType == FrameIndexBase ||
86 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
89 /// isRIPRelative - Return true if this addressing mode is already RIP
91 bool isRIPRelative() const {
92 if (BaseType != RegBase) return false;
93 if (RegisterSDNode *RegNode =
94 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
95 return RegNode->getReg() == X86::RIP;
99 void setBaseReg(SDValue Reg) {
104 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
106 dbgs() << "X86ISelAddressMode " << this << '\n';
107 dbgs() << "Base_Reg ";
108 if (Base_Reg.getNode())
109 Base_Reg.getNode()->dump();
112 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
113 << " Scale" << Scale << '\n'
115 if (IndexReg.getNode())
116 IndexReg.getNode()->dump();
119 dbgs() << " Disp " << Disp << '\n'
136 dbgs() << " JT" << JT << " Align" << Align << '\n';
143 //===--------------------------------------------------------------------===//
144 /// ISel - X86 specific code to select X86 machine instructions for
145 /// SelectionDAG operations.
147 class X86DAGToDAGISel final : public SelectionDAGISel {
148 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
149 /// make the right decision when generating code for different targets.
150 const X86Subtarget *Subtarget;
152 /// OptForSize - If true, selector should try to optimize for code size
153 /// instead of performance.
157 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
158 : SelectionDAGISel(tm, OptLevel),
159 Subtarget(&tm.getSubtarget<X86Subtarget>()),
162 const char *getPassName() const override {
163 return "X86 DAG->DAG Instruction Selection";
166 bool runOnMachineFunction(MachineFunction &MF) override {
167 // Reset the subtarget each time through.
168 Subtarget = &TM.getSubtarget<X86Subtarget>();
169 SelectionDAGISel::runOnMachineFunction(MF);
173 void EmitFunctionEntryCode() override;
175 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
177 void PreprocessISelDAG() override;
179 inline bool immSext8(SDNode *N) const {
180 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
183 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
184 // sign extended field.
185 inline bool i64immSExt32(SDNode *N) const {
186 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
187 return (int64_t)v == (int32_t)v;
190 // Include the pieces autogenerated from the target description.
191 #include "X86GenDAGISel.inc"
194 SDNode *Select(SDNode *N) override;
195 SDNode *SelectGather(SDNode *N, unsigned Opc);
196 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
197 SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT);
199 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
200 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
201 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
202 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
203 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
205 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
206 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
207 SDValue &Scale, SDValue &Index, SDValue &Disp,
209 bool SelectMOV64Imm32(SDValue N, SDValue &Imm);
210 bool SelectLEAAddr(SDValue N, SDValue &Base,
211 SDValue &Scale, SDValue &Index, SDValue &Disp,
213 bool SelectLEA64_32Addr(SDValue N, SDValue &Base,
214 SDValue &Scale, SDValue &Index, SDValue &Disp,
216 bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
217 SDValue &Scale, SDValue &Index, SDValue &Disp,
219 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
220 SDValue &Base, SDValue &Scale,
221 SDValue &Index, SDValue &Disp,
223 SDValue &NodeWithChain);
225 bool TryFoldLoad(SDNode *P, SDValue N,
226 SDValue &Base, SDValue &Scale,
227 SDValue &Index, SDValue &Disp,
230 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
231 /// inline asm expressions.
232 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
234 std::vector<SDValue> &OutOps) override;
236 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
238 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
239 SDValue &Scale, SDValue &Index,
240 SDValue &Disp, SDValue &Segment) {
241 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
242 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex,
243 getTargetLowering()->getPointerTy()) :
245 Scale = getI8Imm(AM.Scale);
247 // These are 32-bit even in 64-bit mode since RIP relative offset
250 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
254 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
255 AM.Align, AM.Disp, AM.SymbolFlags);
257 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
258 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
259 } else if (AM.JT != -1) {
260 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
261 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
262 } else if (AM.BlockAddr)
263 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
266 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
268 if (AM.Segment.getNode())
269 Segment = AM.Segment;
271 Segment = CurDAG->getRegister(0, MVT::i32);
274 /// getI8Imm - Return a target constant with the specified value, of type
276 inline SDValue getI8Imm(unsigned Imm) {
277 return CurDAG->getTargetConstant(Imm, MVT::i8);
280 /// getI32Imm - Return a target constant with the specified value, of type
282 inline SDValue getI32Imm(unsigned Imm) {
283 return CurDAG->getTargetConstant(Imm, MVT::i32);
286 /// getGlobalBaseReg - Return an SDNode that returns the value of
287 /// the global base register. Output instructions required to
288 /// initialize the global base register, if necessary.
290 SDNode *getGlobalBaseReg();
292 /// getTargetMachine - Return a reference to the TargetMachine, casted
293 /// to the target-specific type.
294 const X86TargetMachine &getTargetMachine() const {
295 return static_cast<const X86TargetMachine &>(TM);
298 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
299 /// to the target-specific type.
300 const X86InstrInfo *getInstrInfo() const {
301 return getTargetMachine().getSubtargetImpl()->getInstrInfo();
308 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
309 if (OptLevel == CodeGenOpt::None) return false;
314 if (N.getOpcode() != ISD::LOAD)
317 // If N is a load, do additional profitability checks.
319 switch (U->getOpcode()) {
332 SDValue Op1 = U->getOperand(1);
334 // If the other operand is a 8-bit immediate we should fold the immediate
335 // instead. This reduces code size.
337 // movl 4(%esp), %eax
341 // addl 4(%esp), %eax
342 // The former is 2 bytes shorter. In case where the increment is 1, then
343 // the saving can be 4 bytes (by using incl %eax).
344 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
345 if (Imm->getAPIntValue().isSignedIntN(8))
348 // If the other operand is a TLS address, we should fold it instead.
351 // leal i@NTPOFF(%eax), %eax
353 // movl $i@NTPOFF, %eax
355 // if the block also has an access to a second TLS address this will save
357 // FIXME: This is probably also true for non-TLS addresses.
358 if (Op1.getOpcode() == X86ISD::Wrapper) {
359 SDValue Val = Op1.getOperand(0);
360 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
370 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
371 /// load's chain operand and move load below the call's chain operand.
372 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
373 SDValue Call, SDValue OrigChain) {
374 SmallVector<SDValue, 8> Ops;
375 SDValue Chain = OrigChain.getOperand(0);
376 if (Chain.getNode() == Load.getNode())
377 Ops.push_back(Load.getOperand(0));
379 assert(Chain.getOpcode() == ISD::TokenFactor &&
380 "Unexpected chain operand");
381 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
382 if (Chain.getOperand(i).getNode() == Load.getNode())
383 Ops.push_back(Load.getOperand(0));
385 Ops.push_back(Chain.getOperand(i));
387 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
389 Ops.push_back(NewChain);
391 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
392 Ops.push_back(OrigChain.getOperand(i));
393 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
394 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
395 Load.getOperand(1), Load.getOperand(2));
397 unsigned NumOps = Call.getNode()->getNumOperands();
399 Ops.push_back(SDValue(Load.getNode(), 1));
400 for (unsigned i = 1, e = NumOps; i != e; ++i)
401 Ops.push_back(Call.getOperand(i));
402 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
405 /// isCalleeLoad - Return true if call address is a load and it can be
406 /// moved below CALLSEQ_START and the chains leading up to the call.
407 /// Return the CALLSEQ_START by reference as a second output.
408 /// In the case of a tail call, there isn't a callseq node between the call
409 /// chain and the load.
410 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
411 // The transformation is somewhat dangerous if the call's chain was glued to
412 // the call. After MoveBelowOrigChain the load is moved between the call and
413 // the chain, this can create a cycle if the load is not folded. So it is
414 // *really* important that we are sure the load will be folded.
415 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
417 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
420 LD->getAddressingMode() != ISD::UNINDEXED ||
421 LD->getExtensionType() != ISD::NON_EXTLOAD)
424 // Now let's find the callseq_start.
425 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
426 if (!Chain.hasOneUse())
428 Chain = Chain.getOperand(0);
431 if (!Chain.getNumOperands())
433 // Since we are not checking for AA here, conservatively abort if the chain
434 // writes to memory. It's not safe to move the callee (a load) across a store.
435 if (isa<MemSDNode>(Chain.getNode()) &&
436 cast<MemSDNode>(Chain.getNode())->writeMem())
438 if (Chain.getOperand(0).getNode() == Callee.getNode())
440 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
441 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
442 Callee.getValue(1).hasOneUse())
447 void X86DAGToDAGISel::PreprocessISelDAG() {
448 // OptForSize is used in pattern predicates that isel is matching.
449 OptForSize = MF->getFunction()->getAttributes().
450 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
452 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
453 E = CurDAG->allnodes_end(); I != E; ) {
454 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
456 if (OptLevel != CodeGenOpt::None &&
457 // Only does this when target favors doesn't favor register indirect
459 ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) ||
460 (N->getOpcode() == X86ISD::TC_RETURN &&
461 // Only does this if load can be folded into TC_RETURN.
462 (Subtarget->is64Bit() ||
463 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
464 /// Also try moving call address load from outside callseq_start to just
465 /// before the call to allow it to be folded.
483 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
484 SDValue Chain = N->getOperand(0);
485 SDValue Load = N->getOperand(1);
486 if (!isCalleeLoad(Load, Chain, HasCallSeq))
488 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
493 // Lower fpround and fpextend nodes that target the FP stack to be store and
494 // load to the stack. This is a gross hack. We would like to simply mark
495 // these as being illegal, but when we do that, legalize produces these when
496 // it expands calls, then expands these in the same legalize pass. We would
497 // like dag combine to be able to hack on these between the call expansion
498 // and the node legalization. As such this pass basically does "really
499 // late" legalization of these inline with the X86 isel pass.
500 // FIXME: This should only happen when not compiled with -O0.
501 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
504 MVT SrcVT = N->getOperand(0).getSimpleValueType();
505 MVT DstVT = N->getSimpleValueType(0);
507 // If any of the sources are vectors, no fp stack involved.
508 if (SrcVT.isVector() || DstVT.isVector())
511 // If the source and destination are SSE registers, then this is a legal
512 // conversion that should not be lowered.
513 const X86TargetLowering *X86Lowering =
514 static_cast<const X86TargetLowering *>(getTargetLowering());
515 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
516 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
517 if (SrcIsSSE && DstIsSSE)
520 if (!SrcIsSSE && !DstIsSSE) {
521 // If this is an FPStack extension, it is a noop.
522 if (N->getOpcode() == ISD::FP_EXTEND)
524 // If this is a value-preserving FPStack truncation, it is a noop.
525 if (N->getConstantOperandVal(1))
529 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
530 // FPStack has extload and truncstore. SSE can fold direct loads into other
531 // operations. Based on this, decide what we want to do.
533 if (N->getOpcode() == ISD::FP_ROUND)
534 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
536 MemVT = SrcIsSSE ? SrcVT : DstVT;
538 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
541 // FIXME: optimize the case where the src/dest is a load or store?
542 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
544 MemTmp, MachinePointerInfo(), MemVT,
546 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
547 MachinePointerInfo(),
548 MemVT, false, false, false, 0);
550 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
551 // extload we created. This will cause general havok on the dag because
552 // anything below the conversion could be folded into other existing nodes.
553 // To avoid invalidating 'I', back it up to the convert node.
555 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
557 // Now that we did that, the node is dead. Increment the iterator to the
558 // next node to process, then delete N.
560 CurDAG->DeleteNode(N);
565 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
566 /// the main function.
567 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
568 MachineFrameInfo *MFI) {
569 const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
570 if (Subtarget->isTargetCygMing()) {
572 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
573 BuildMI(BB, DebugLoc(),
574 TII->get(CallOp)).addExternalSymbol("__main");
578 void X86DAGToDAGISel::EmitFunctionEntryCode() {
579 // If this is main, emit special code for main.
580 if (const Function *Fn = MF->getFunction())
581 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
582 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
585 static bool isDispSafeForFrameIndex(int64_t Val) {
586 // On 64-bit platforms, we can run into an issue where a frame index
587 // includes a displacement that, when added to the explicit displacement,
588 // will overflow the displacement field. Assuming that the frame index
589 // displacement fits into a 31-bit integer (which is only slightly more
590 // aggressive than the current fundamental assumption that it fits into
591 // a 32-bit integer), a 31-bit disp should always be safe.
592 return isInt<31>(Val);
595 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
596 X86ISelAddressMode &AM) {
597 int64_t Val = AM.Disp + Offset;
598 CodeModel::Model M = TM.getCodeModel();
599 if (Subtarget->is64Bit()) {
600 if (!X86::isOffsetSuitableForCodeModel(Val, M,
601 AM.hasSymbolicDisplacement()))
603 // In addition to the checks required for a register base, check that
604 // we do not try to use an unsafe Disp with a frame index.
605 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
606 !isDispSafeForFrameIndex(Val))
614 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
615 SDValue Address = N->getOperand(1);
617 // load gs:0 -> GS segment register.
618 // load fs:0 -> FS segment register.
620 // This optimization is valid because the GNU TLS model defines that
621 // gs:0 (or fs:0 on X86-64) contains its own address.
622 // For more information see http://people.redhat.com/drepper/tls.pdf
623 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
624 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
625 Subtarget->isTargetLinux())
626 switch (N->getPointerInfo().getAddrSpace()) {
628 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
631 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
638 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
639 /// into an addressing mode. These wrap things that will resolve down into a
640 /// symbol reference. If no match is possible, this returns true, otherwise it
642 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
643 // If the addressing mode already has a symbol as the displacement, we can
644 // never match another symbol.
645 if (AM.hasSymbolicDisplacement())
648 SDValue N0 = N.getOperand(0);
649 CodeModel::Model M = TM.getCodeModel();
651 // Handle X86-64 rip-relative addresses. We check this before checking direct
652 // folding because RIP is preferable to non-RIP accesses.
653 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
654 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
655 // they cannot be folded into immediate fields.
656 // FIXME: This can be improved for kernel and other models?
657 (M == CodeModel::Small || M == CodeModel::Kernel)) {
658 // Base and index reg must be 0 in order to use %rip as base.
659 if (AM.hasBaseOrIndexReg())
661 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
662 X86ISelAddressMode Backup = AM;
663 AM.GV = G->getGlobal();
664 AM.SymbolFlags = G->getTargetFlags();
665 if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
669 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
670 X86ISelAddressMode Backup = AM;
671 AM.CP = CP->getConstVal();
672 AM.Align = CP->getAlignment();
673 AM.SymbolFlags = CP->getTargetFlags();
674 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
678 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
679 AM.ES = S->getSymbol();
680 AM.SymbolFlags = S->getTargetFlags();
681 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
682 AM.JT = J->getIndex();
683 AM.SymbolFlags = J->getTargetFlags();
684 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
685 X86ISelAddressMode Backup = AM;
686 AM.BlockAddr = BA->getBlockAddress();
687 AM.SymbolFlags = BA->getTargetFlags();
688 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) {
693 llvm_unreachable("Unhandled symbol reference node.");
695 if (N.getOpcode() == X86ISD::WrapperRIP)
696 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
700 // Handle the case when globals fit in our immediate field: This is true for
701 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
702 // mode, this only applies to a non-RIP-relative computation.
703 if (!Subtarget->is64Bit() ||
704 M == CodeModel::Small || M == CodeModel::Kernel) {
705 assert(N.getOpcode() != X86ISD::WrapperRIP &&
706 "RIP-relative addressing already handled");
707 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
708 AM.GV = G->getGlobal();
709 AM.Disp += G->getOffset();
710 AM.SymbolFlags = G->getTargetFlags();
711 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
712 AM.CP = CP->getConstVal();
713 AM.Align = CP->getAlignment();
714 AM.Disp += CP->getOffset();
715 AM.SymbolFlags = CP->getTargetFlags();
716 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
717 AM.ES = S->getSymbol();
718 AM.SymbolFlags = S->getTargetFlags();
719 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
720 AM.JT = J->getIndex();
721 AM.SymbolFlags = J->getTargetFlags();
722 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
723 AM.BlockAddr = BA->getBlockAddress();
724 AM.Disp += BA->getOffset();
725 AM.SymbolFlags = BA->getTargetFlags();
727 llvm_unreachable("Unhandled symbol reference node.");
734 /// MatchAddress - Add the specified node to the specified addressing mode,
735 /// returning true if it cannot be done. This just pattern matches for the
737 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
738 if (MatchAddressRecursively(N, AM, 0))
741 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
742 // a smaller encoding and avoids a scaled-index.
744 AM.BaseType == X86ISelAddressMode::RegBase &&
745 AM.Base_Reg.getNode() == nullptr) {
746 AM.Base_Reg = AM.IndexReg;
750 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
751 // because it has a smaller encoding.
752 // TODO: Which other code models can use this?
753 if (TM.getCodeModel() == CodeModel::Small &&
754 Subtarget->is64Bit() &&
756 AM.BaseType == X86ISelAddressMode::RegBase &&
757 AM.Base_Reg.getNode() == nullptr &&
758 AM.IndexReg.getNode() == nullptr &&
759 AM.SymbolFlags == X86II::MO_NO_FLAG &&
760 AM.hasSymbolicDisplacement())
761 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
766 // Insert a node into the DAG at least before the Pos node's position. This
767 // will reposition the node as needed, and will assign it a node ID that is <=
768 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
769 // IDs! The selection DAG must no longer depend on their uniqueness when this
771 static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
772 if (N.getNode()->getNodeId() == -1 ||
773 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
774 DAG.RepositionNode(Pos.getNode(), N.getNode());
775 N.getNode()->setNodeId(Pos.getNode()->getNodeId());
779 // Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
780 // allows us to convert the shift and and into an h-register extract and
781 // a scaled index. Returns false if the simplification is performed.
782 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
784 SDValue Shift, SDValue X,
785 X86ISelAddressMode &AM) {
786 if (Shift.getOpcode() != ISD::SRL ||
787 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
791 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
792 if (ScaleLog <= 0 || ScaleLog >= 4 ||
793 Mask != (0xffu << ScaleLog))
796 MVT VT = N.getSimpleValueType();
798 SDValue Eight = DAG.getConstant(8, MVT::i8);
799 SDValue NewMask = DAG.getConstant(0xff, VT);
800 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
801 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
802 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
803 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
805 // Insert the new nodes into the topological ordering. We must do this in
806 // a valid topological ordering as nothing is going to go back and re-sort
807 // these nodes. We continually insert before 'N' in sequence as this is
808 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
809 // hierarchy left to express.
810 InsertDAGNode(DAG, N, Eight);
811 InsertDAGNode(DAG, N, Srl);
812 InsertDAGNode(DAG, N, NewMask);
813 InsertDAGNode(DAG, N, And);
814 InsertDAGNode(DAG, N, ShlCount);
815 InsertDAGNode(DAG, N, Shl);
816 DAG.ReplaceAllUsesWith(N, Shl);
818 AM.Scale = (1 << ScaleLog);
822 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
823 // allows us to fold the shift into this addressing mode. Returns false if the
824 // transform succeeded.
825 static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
827 SDValue Shift, SDValue X,
828 X86ISelAddressMode &AM) {
829 if (Shift.getOpcode() != ISD::SHL ||
830 !isa<ConstantSDNode>(Shift.getOperand(1)))
833 // Not likely to be profitable if either the AND or SHIFT node has more
834 // than one use (unless all uses are for address computation). Besides,
835 // isel mechanism requires their node ids to be reused.
836 if (!N.hasOneUse() || !Shift.hasOneUse())
839 // Verify that the shift amount is something we can fold.
840 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
841 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
844 MVT VT = N.getSimpleValueType();
846 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
847 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
848 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
850 // Insert the new nodes into the topological ordering. We must do this in
851 // a valid topological ordering as nothing is going to go back and re-sort
852 // these nodes. We continually insert before 'N' in sequence as this is
853 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
854 // hierarchy left to express.
855 InsertDAGNode(DAG, N, NewMask);
856 InsertDAGNode(DAG, N, NewAnd);
857 InsertDAGNode(DAG, N, NewShift);
858 DAG.ReplaceAllUsesWith(N, NewShift);
860 AM.Scale = 1 << ShiftAmt;
861 AM.IndexReg = NewAnd;
865 // Implement some heroics to detect shifts of masked values where the mask can
866 // be replaced by extending the shift and undoing that in the addressing mode
867 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
868 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
869 // the addressing mode. This results in code such as:
871 // int f(short *y, int *lookup_table) {
873 // return *y + lookup_table[*y >> 11];
877 // movzwl (%rdi), %eax
880 // addl (%rsi,%rcx,4), %eax
883 // movzwl (%rdi), %eax
887 // addl (%rsi,%rcx), %eax
889 // Note that this function assumes the mask is provided as a mask *after* the
890 // value is shifted. The input chain may or may not match that, but computing
891 // such a mask is trivial.
892 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
894 SDValue Shift, SDValue X,
895 X86ISelAddressMode &AM) {
896 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
897 !isa<ConstantSDNode>(Shift.getOperand(1)))
900 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
901 unsigned MaskLZ = countLeadingZeros(Mask);
902 unsigned MaskTZ = countTrailingZeros(Mask);
904 // The amount of shift we're trying to fit into the addressing mode is taken
905 // from the trailing zeros of the mask.
906 unsigned AMShiftAmt = MaskTZ;
908 // There is nothing we can do here unless the mask is removing some bits.
909 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
910 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
912 // We also need to ensure that mask is a continuous run of bits.
913 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
915 // Scale the leading zero count down based on the actual size of the value.
916 // Also scale it down based on the size of the shift.
917 MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
919 // The final check is to ensure that any masked out high bits of X are
920 // already known to be zero. Otherwise, the mask has a semantic impact
921 // other than masking out a couple of low bits. Unfortunately, because of
922 // the mask, zero extensions will be removed from operands in some cases.
923 // This code works extra hard to look through extensions because we can
924 // replace them with zero extensions cheaply if necessary.
925 bool ReplacingAnyExtend = false;
926 if (X.getOpcode() == ISD::ANY_EXTEND) {
927 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
928 X.getOperand(0).getSimpleValueType().getSizeInBits();
929 // Assume that we'll replace the any-extend with a zero-extend, and
930 // narrow the search to the extended value.
932 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
933 ReplacingAnyExtend = true;
935 APInt MaskedHighBits =
936 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
937 APInt KnownZero, KnownOne;
938 DAG.computeKnownBits(X, KnownZero, KnownOne);
939 if (MaskedHighBits != KnownZero) return true;
941 // We've identified a pattern that can be transformed into a single shift
942 // and an addressing mode. Make it so.
943 MVT VT = N.getSimpleValueType();
944 if (ReplacingAnyExtend) {
945 assert(X.getValueType() != VT);
946 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
947 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
948 InsertDAGNode(DAG, N, NewX);
952 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
953 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
954 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
955 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
957 // Insert the new nodes into the topological ordering. We must do this in
958 // a valid topological ordering as nothing is going to go back and re-sort
959 // these nodes. We continually insert before 'N' in sequence as this is
960 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
961 // hierarchy left to express.
962 InsertDAGNode(DAG, N, NewSRLAmt);
963 InsertDAGNode(DAG, N, NewSRL);
964 InsertDAGNode(DAG, N, NewSHLAmt);
965 InsertDAGNode(DAG, N, NewSHL);
966 DAG.ReplaceAllUsesWith(N, NewSHL);
968 AM.Scale = 1 << AMShiftAmt;
969 AM.IndexReg = NewSRL;
973 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
977 dbgs() << "MatchAddress: ";
982 return MatchAddressBase(N, AM);
984 // If this is already a %rip relative address, we can only merge immediates
985 // into it. Instead of handling this in every case, we handle it here.
986 // RIP relative addressing: %rip + 32-bit displacement!
987 if (AM.isRIPRelative()) {
988 // FIXME: JumpTable and ExternalSymbol address currently don't like
989 // displacements. It isn't very important, but this should be fixed for
991 if (!AM.ES && AM.JT != -1) return true;
993 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
994 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
999 switch (N.getOpcode()) {
1001 case ISD::Constant: {
1002 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1003 if (!FoldOffsetIntoAddress(Val, AM))
1008 case X86ISD::Wrapper:
1009 case X86ISD::WrapperRIP:
1010 if (!MatchWrapper(N, AM))
1015 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
1019 case ISD::FrameIndex:
1020 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1021 AM.Base_Reg.getNode() == nullptr &&
1022 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1023 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1024 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1030 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1034 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
1035 unsigned Val = CN->getZExtValue();
1036 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1037 // that the base operand remains free for further matching. If
1038 // the base doesn't end up getting used, a post-processing step
1039 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1040 if (Val == 1 || Val == 2 || Val == 3) {
1041 AM.Scale = 1 << Val;
1042 SDValue ShVal = N.getNode()->getOperand(0);
1044 // Okay, we know that we have a scale by now. However, if the scaled
1045 // value is an add of something and a constant, we can fold the
1046 // constant into the disp field here.
1047 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1048 AM.IndexReg = ShVal.getNode()->getOperand(0);
1049 ConstantSDNode *AddVal =
1050 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1051 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1052 if (!FoldOffsetIntoAddress(Disp, AM))
1056 AM.IndexReg = ShVal;
1063 // Scale must not be used already.
1064 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1066 SDValue And = N.getOperand(0);
1067 if (And.getOpcode() != ISD::AND) break;
1068 SDValue X = And.getOperand(0);
1070 // We only handle up to 64-bit values here as those are what matter for
1071 // addressing mode optimizations.
1072 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1074 // The mask used for the transform is expected to be post-shift, but we
1075 // found the shift first so just apply the shift to the mask before passing
1077 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1078 !isa<ConstantSDNode>(And.getOperand(1)))
1080 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1082 // Try to fold the mask and shift into the scale, and return false if we
1084 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1089 case ISD::SMUL_LOHI:
1090 case ISD::UMUL_LOHI:
1091 // A mul_lohi where we need the low part can be folded as a plain multiply.
1092 if (N.getResNo() != 0) break;
1095 case X86ISD::MUL_IMM:
1096 // X*[3,5,9] -> X+X*[2,4,8]
1097 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1098 AM.Base_Reg.getNode() == nullptr &&
1099 AM.IndexReg.getNode() == nullptr) {
1101 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1102 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1103 CN->getZExtValue() == 9) {
1104 AM.Scale = unsigned(CN->getZExtValue())-1;
1106 SDValue MulVal = N.getNode()->getOperand(0);
1109 // Okay, we know that we have a scale by now. However, if the scaled
1110 // value is an add of something and a constant, we can fold the
1111 // constant into the disp field here.
1112 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1113 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1114 Reg = MulVal.getNode()->getOperand(0);
1115 ConstantSDNode *AddVal =
1116 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1117 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1118 if (FoldOffsetIntoAddress(Disp, AM))
1119 Reg = N.getNode()->getOperand(0);
1121 Reg = N.getNode()->getOperand(0);
1124 AM.IndexReg = AM.Base_Reg = Reg;
1131 // Given A-B, if A can be completely folded into the address and
1132 // the index field with the index field unused, use -B as the index.
1133 // This is a win if a has multiple parts that can be folded into
1134 // the address. Also, this saves a mov if the base register has
1135 // other uses, since it avoids a two-address sub instruction, however
1136 // it costs an additional mov if the index register has other uses.
1138 // Add an artificial use to this node so that we can keep track of
1139 // it if it gets CSE'd with a different node.
1140 HandleSDNode Handle(N);
1142 // Test if the LHS of the sub can be folded.
1143 X86ISelAddressMode Backup = AM;
1144 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1148 // Test if the index field is free for use.
1149 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1155 SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1156 // If the RHS involves a register with multiple uses, this
1157 // transformation incurs an extra mov, due to the neg instruction
1158 // clobbering its operand.
1159 if (!RHS.getNode()->hasOneUse() ||
1160 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1161 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1162 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1163 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1164 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1166 // If the base is a register with multiple uses, this
1167 // transformation may save a mov.
1168 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1169 AM.Base_Reg.getNode() &&
1170 !AM.Base_Reg.getNode()->hasOneUse()) ||
1171 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1173 // If the folded LHS was interesting, this transformation saves
1174 // address arithmetic.
1175 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1176 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1177 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1179 // If it doesn't look like it may be an overall win, don't do it.
1185 // Ok, the transformation is legal and appears profitable. Go for it.
1186 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
1187 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1191 // Insert the new nodes into the topological ordering.
1192 InsertDAGNode(*CurDAG, N, Zero);
1193 InsertDAGNode(*CurDAG, N, Neg);
1198 // Add an artificial use to this node so that we can keep track of
1199 // it if it gets CSE'd with a different node.
1200 HandleSDNode Handle(N);
1202 X86ISelAddressMode Backup = AM;
1203 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1204 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1208 // Try again after commuting the operands.
1209 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1210 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1214 // If we couldn't fold both operands into the address at the same time,
1215 // see if we can just put each operand into a register and fold at least
1217 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1218 !AM.Base_Reg.getNode() &&
1219 !AM.IndexReg.getNode()) {
1220 N = Handle.getValue();
1221 AM.Base_Reg = N.getOperand(0);
1222 AM.IndexReg = N.getOperand(1);
1226 N = Handle.getValue();
1231 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1232 if (CurDAG->isBaseWithConstantOffset(N)) {
1233 X86ISelAddressMode Backup = AM;
1234 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
1236 // Start with the LHS as an addr mode.
1237 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1238 !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
1245 // Perform some heroic transforms on an and of a constant-count shift
1246 // with a constant to enable use of the scaled offset field.
1248 // Scale must not be used already.
1249 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1251 SDValue Shift = N.getOperand(0);
1252 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1253 SDValue X = Shift.getOperand(0);
1255 // We only handle up to 64-bit values here as those are what matter for
1256 // addressing mode optimizations.
1257 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1259 if (!isa<ConstantSDNode>(N.getOperand(1)))
1261 uint64_t Mask = N.getConstantOperandVal(1);
1263 // Try to fold the mask and shift into an extract and scale.
1264 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1267 // Try to fold the mask and shift directly into the scale.
1268 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1271 // Try to swap the mask and shift to place shifts which can be done as
1272 // a scale on the outside of the mask.
1273 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1279 return MatchAddressBase(N, AM);
1282 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1283 /// specified addressing mode without any further recursion.
1284 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1285 // Is the base register already occupied?
1286 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1287 // If so, check to see if the scale index register is set.
1288 if (!AM.IndexReg.getNode()) {
1294 // Otherwise, we cannot select it.
1298 // Default, generate it as a register.
1299 AM.BaseType = X86ISelAddressMode::RegBase;
1304 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1305 /// It returns the operands which make up the maximal addressing mode it can
1306 /// match by reference.
1308 /// Parent is the parent node of the addr operand that is being matched. It
1309 /// is always a load, store, atomic node, or null. It is only null when
1310 /// checking memory operands for inline asm nodes.
1311 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1312 SDValue &Scale, SDValue &Index,
1313 SDValue &Disp, SDValue &Segment) {
1314 X86ISelAddressMode AM;
1317 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1318 // that are not a MemSDNode, and thus don't have proper addrspace info.
1319 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1320 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1321 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
1322 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
1323 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
1324 unsigned AddrSpace =
1325 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1326 // AddrSpace 256 -> GS, 257 -> FS.
1327 if (AddrSpace == 256)
1328 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1329 if (AddrSpace == 257)
1330 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1333 if (MatchAddress(N, AM))
1336 MVT VT = N.getSimpleValueType();
1337 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1338 if (!AM.Base_Reg.getNode())
1339 AM.Base_Reg = CurDAG->getRegister(0, VT);
1342 if (!AM.IndexReg.getNode())
1343 AM.IndexReg = CurDAG->getRegister(0, VT);
1345 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1349 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1350 /// match a load whose top elements are either undef or zeros. The load flavor
1351 /// is derived from the type of N, which is either v4f32 or v2f64.
1354 /// PatternChainNode: this is the matched node that has a chain input and
1356 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1357 SDValue N, SDValue &Base,
1358 SDValue &Scale, SDValue &Index,
1359 SDValue &Disp, SDValue &Segment,
1360 SDValue &PatternNodeWithChain) {
1361 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1362 PatternNodeWithChain = N.getOperand(0);
1363 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1364 PatternNodeWithChain.hasOneUse() &&
1365 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1366 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1367 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1368 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1374 // Also handle the case where we explicitly require zeros in the top
1375 // elements. This is a vector shuffle from the zero vector.
1376 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1377 // Check to see if the top elements are all zeros (or bitcast of zeros).
1378 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1379 N.getOperand(0).getNode()->hasOneUse() &&
1380 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1381 N.getOperand(0).getOperand(0).hasOneUse() &&
1382 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1383 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1384 // Okay, this is a zero extending load. Fold it.
1385 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1386 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1388 PatternNodeWithChain = SDValue(LD, 0);
1395 bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) {
1396 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1397 uint64_t ImmVal = CN->getZExtValue();
1398 if ((uint32_t)ImmVal != (uint64_t)ImmVal)
1401 Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64);
1405 // In static codegen with small code model, we can get the address of a label
1406 // into a register with 'movl'. TableGen has already made sure we're looking
1407 // at a label of some kind.
1408 assert(N->getOpcode() == X86ISD::Wrapper &&
1409 "Unexpected node type for MOV32ri64");
1410 N = N.getOperand(0);
1412 if (N->getOpcode() != ISD::TargetConstantPool &&
1413 N->getOpcode() != ISD::TargetJumpTable &&
1414 N->getOpcode() != ISD::TargetGlobalAddress &&
1415 N->getOpcode() != ISD::TargetExternalSymbol &&
1416 N->getOpcode() != ISD::TargetBlockAddress)
1420 return TM.getCodeModel() == CodeModel::Small;
1423 bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base,
1424 SDValue &Scale, SDValue &Index,
1425 SDValue &Disp, SDValue &Segment) {
1426 if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment))
1430 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
1431 if (RN && RN->getReg() == 0)
1432 Base = CurDAG->getRegister(0, MVT::i64);
1433 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(N)) {
1434 // Base could already be %rip, particularly in the x32 ABI.
1435 Base = SDValue(CurDAG->getMachineNode(
1436 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1437 CurDAG->getTargetConstant(0, MVT::i64),
1439 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
1443 RN = dyn_cast<RegisterSDNode>(Index);
1444 if (RN && RN->getReg() == 0)
1445 Index = CurDAG->getRegister(0, MVT::i64);
1447 assert(Index.getValueType() == MVT::i32 &&
1448 "Expect to be extending 32-bit registers for use in LEA");
1449 Index = SDValue(CurDAG->getMachineNode(
1450 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1451 CurDAG->getTargetConstant(0, MVT::i64),
1453 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
1460 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1461 /// mode it matches can be cost effectively emitted as an LEA instruction.
1462 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1463 SDValue &Base, SDValue &Scale,
1464 SDValue &Index, SDValue &Disp,
1466 X86ISelAddressMode AM;
1468 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1470 SDValue Copy = AM.Segment;
1471 SDValue T = CurDAG->getRegister(0, MVT::i32);
1473 if (MatchAddress(N, AM))
1475 assert (T == AM.Segment);
1478 MVT VT = N.getSimpleValueType();
1479 unsigned Complexity = 0;
1480 if (AM.BaseType == X86ISelAddressMode::RegBase)
1481 if (AM.Base_Reg.getNode())
1484 AM.Base_Reg = CurDAG->getRegister(0, VT);
1485 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1488 if (AM.IndexReg.getNode())
1491 AM.IndexReg = CurDAG->getRegister(0, VT);
1493 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1498 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1499 // to a LEA. This is determined with some expermentation but is by no means
1500 // optimal (especially for code size consideration). LEA is nice because of
1501 // its three-address nature. Tweak the cost function again when we can run
1502 // convertToThreeAddress() at register allocation time.
1503 if (AM.hasSymbolicDisplacement()) {
1504 // For X86-64, we should always use lea to materialize RIP relative
1506 if (Subtarget->is64Bit())
1512 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1515 // If it isn't worth using an LEA, reject it.
1516 if (Complexity <= 2)
1519 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1523 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1524 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1525 SDValue &Scale, SDValue &Index,
1526 SDValue &Disp, SDValue &Segment) {
1527 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1528 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1530 X86ISelAddressMode AM;
1531 AM.GV = GA->getGlobal();
1532 AM.Disp += GA->getOffset();
1533 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1534 AM.SymbolFlags = GA->getTargetFlags();
1536 if (N.getValueType() == MVT::i32) {
1538 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1540 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1543 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1548 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1549 SDValue &Base, SDValue &Scale,
1550 SDValue &Index, SDValue &Disp,
1552 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1553 !IsProfitableToFold(N, P, P) ||
1554 !IsLegalToFold(N, P, P, OptLevel))
1557 return SelectAddr(N.getNode(),
1558 N.getOperand(1), Base, Scale, Index, Disp, Segment);
1561 /// getGlobalBaseReg - Return an SDNode that returns the value of
1562 /// the global base register. Output instructions required to
1563 /// initialize the global base register, if necessary.
1565 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1566 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1567 return CurDAG->getRegister(GlobalBaseReg,
1568 getTargetLowering()->getPointerTy()).getNode();
1571 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1572 SDValue Chain = Node->getOperand(0);
1573 SDValue In1 = Node->getOperand(1);
1574 SDValue In2L = Node->getOperand(2);
1575 SDValue In2H = Node->getOperand(3);
1577 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1578 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1580 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1581 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1582 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1583 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node),
1584 MVT::i32, MVT::i32, MVT::Other, Ops);
1585 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1589 /// Atomic opcode table
1617 static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1628 X86::LOCK_ADD64mi32,
1641 X86::LOCK_SUB64mi32,
1693 X86::LOCK_AND64mi32,
1706 X86::LOCK_XOR64mi32,
1711 // Return the target constant operand for atomic-load-op and do simple
1712 // translations, such as from atomic-load-add to lock-sub. The return value is
1713 // one of the following 3 cases:
1714 // + target-constant, the operand could be supported as a target constant.
1715 // + empty, the operand is not needed any more with the new op selected.
1716 // + non-empty, otherwise.
1717 static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
1719 enum AtomicOpc &Op, MVT NVT,
1721 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
1722 int64_t CNVal = CN->getSExtValue();
1723 // Quit if not 32-bit imm.
1724 if ((int32_t)CNVal != CNVal)
1726 // For atomic-load-add, we could do some optimizations.
1728 // Translate to INC/DEC if ADD by 1 or -1.
1729 if ((CNVal == 1) || (CNVal == -1)) {
1730 Op = (CNVal == 1) ? INC : DEC;
1731 // No more constant operand after being translated into INC/DEC.
1734 // Translate to SUB if ADD by negative value.
1740 return CurDAG->getTargetConstant(CNVal, NVT);
1743 // If the value operand is single-used, try to optimize it.
1744 if (Op == ADD && Val.hasOneUse()) {
1745 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
1746 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
1748 return Val.getOperand(1);
1750 // A special case for i16, which needs truncating as, in most cases, it's
1751 // promoted to i32. We will translate
1752 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
1753 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
1754 Val.getOperand(0).getOpcode() == ISD::SUB &&
1755 X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
1757 Val = Val.getOperand(0);
1758 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
1766 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
1767 if (Node->hasAnyUseOfValue(0))
1772 // Optimize common patterns for __sync_or_and_fetch and similar arith
1773 // operations where the result is not used. This allows us to use the "lock"
1774 // version of the arithmetic instruction.
1775 SDValue Chain = Node->getOperand(0);
1776 SDValue Ptr = Node->getOperand(1);
1777 SDValue Val = Node->getOperand(2);
1778 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1779 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1782 // Which index into the table.
1784 switch (Node->getOpcode()) {
1787 case ISD::ATOMIC_LOAD_OR:
1790 case ISD::ATOMIC_LOAD_AND:
1793 case ISD::ATOMIC_LOAD_XOR:
1796 case ISD::ATOMIC_LOAD_ADD:
1801 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val);
1802 bool isUnOp = !Val.getNode();
1803 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
1806 switch (NVT.SimpleTy) {
1807 default: return nullptr;
1810 Opc = AtomicOpcTbl[Op][ConstantI8];
1812 Opc = AtomicOpcTbl[Op][I8];
1816 if (immSext8(Val.getNode()))
1817 Opc = AtomicOpcTbl[Op][SextConstantI16];
1819 Opc = AtomicOpcTbl[Op][ConstantI16];
1821 Opc = AtomicOpcTbl[Op][I16];
1825 if (immSext8(Val.getNode()))
1826 Opc = AtomicOpcTbl[Op][SextConstantI32];
1828 Opc = AtomicOpcTbl[Op][ConstantI32];
1830 Opc = AtomicOpcTbl[Op][I32];
1833 Opc = AtomicOpcTbl[Op][I64];
1835 if (immSext8(Val.getNode()))
1836 Opc = AtomicOpcTbl[Op][SextConstantI64];
1837 else if (i64immSExt32(Val.getNode()))
1838 Opc = AtomicOpcTbl[Op][ConstantI64];
1843 assert(Opc != 0 && "Invalid arith lock transform!");
1846 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1848 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1849 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1851 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1852 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
1854 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1855 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
1857 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1858 SDValue RetVals[] = { Undef, Ret };
1859 return CurDAG->getMergeValues(RetVals, dl).getNode();
1862 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1863 /// any uses which require the SF or OF bits to be accurate.
1864 static bool HasNoSignedComparisonUses(SDNode *N) {
1865 // Examine each user of the node.
1866 for (SDNode::use_iterator UI = N->use_begin(),
1867 UE = N->use_end(); UI != UE; ++UI) {
1868 // Only examine CopyToReg uses.
1869 if (UI->getOpcode() != ISD::CopyToReg)
1871 // Only examine CopyToReg uses that copy to EFLAGS.
1872 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1875 // Examine each user of the CopyToReg use.
1876 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1877 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1878 // Only examine the Flag result.
1879 if (FlagUI.getUse().getResNo() != 1) continue;
1880 // Anything unusual: assume conservatively.
1881 if (!FlagUI->isMachineOpcode()) return false;
1882 // Examine the opcode of the user.
1883 switch (FlagUI->getMachineOpcode()) {
1884 // These comparisons don't treat the most significant bit specially.
1885 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1886 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1887 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1888 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1889 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1890 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1891 case X86::CMOVA16rr: case X86::CMOVA16rm:
1892 case X86::CMOVA32rr: case X86::CMOVA32rm:
1893 case X86::CMOVA64rr: case X86::CMOVA64rm:
1894 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1895 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1896 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1897 case X86::CMOVB16rr: case X86::CMOVB16rm:
1898 case X86::CMOVB32rr: case X86::CMOVB32rm:
1899 case X86::CMOVB64rr: case X86::CMOVB64rm:
1900 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1901 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1902 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1903 case X86::CMOVE16rr: case X86::CMOVE16rm:
1904 case X86::CMOVE32rr: case X86::CMOVE32rm:
1905 case X86::CMOVE64rr: case X86::CMOVE64rm:
1906 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1907 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1908 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1909 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1910 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1911 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1912 case X86::CMOVP16rr: case X86::CMOVP16rm:
1913 case X86::CMOVP32rr: case X86::CMOVP32rm:
1914 case X86::CMOVP64rr: case X86::CMOVP64rm:
1916 // Anything else: assume conservatively.
1917 default: return false;
1924 /// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
1925 /// is suitable for doing the {load; increment or decrement; store} to modify
1927 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
1928 SDValue StoredVal, SelectionDAG *CurDAG,
1929 LoadSDNode* &LoadNode, SDValue &InputChain) {
1931 // is the value stored the result of a DEC or INC?
1932 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
1934 // is the stored value result 0 of the load?
1935 if (StoredVal.getResNo() != 0) return false;
1937 // are there other uses of the loaded value than the inc or dec?
1938 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
1940 // is the store non-extending and non-indexed?
1941 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1944 SDValue Load = StoredVal->getOperand(0);
1945 // Is the stored value a non-extending and non-indexed load?
1946 if (!ISD::isNormalLoad(Load.getNode())) return false;
1948 // Return LoadNode by reference.
1949 LoadNode = cast<LoadSDNode>(Load);
1950 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
1951 EVT LdVT = LoadNode->getMemoryVT();
1952 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
1956 // Is store the only read of the loaded value?
1957 if (!Load.hasOneUse())
1960 // Is the address of the store the same as the load?
1961 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1962 LoadNode->getOffset() != StoreNode->getOffset())
1965 // Check if the chain is produced by the load or is a TokenFactor with
1966 // the load output chain as an operand. Return InputChain by reference.
1967 SDValue Chain = StoreNode->getChain();
1969 bool ChainCheck = false;
1970 if (Chain == Load.getValue(1)) {
1972 InputChain = LoadNode->getChain();
1973 } else if (Chain.getOpcode() == ISD::TokenFactor) {
1974 SmallVector<SDValue, 4> ChainOps;
1975 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1976 SDValue Op = Chain.getOperand(i);
1977 if (Op == Load.getValue(1)) {
1982 // Make sure using Op as part of the chain would not cause a cycle here.
1983 // In theory, we could check whether the chain node is a predecessor of
1984 // the load. But that can be very expensive. Instead visit the uses and
1985 // make sure they all have smaller node id than the load.
1986 int LoadId = LoadNode->getNodeId();
1987 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
1988 UE = UI->use_end(); UI != UE; ++UI) {
1989 if (UI.getUse().getResNo() != 0)
1991 if (UI->getNodeId() > LoadId)
1995 ChainOps.push_back(Op);
1999 // Make a new TokenFactor with all the other input chains except
2001 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
2002 MVT::Other, ChainOps);
2010 /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
2011 /// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
2012 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
2013 if (Opc == X86ISD::DEC) {
2014 if (LdVT == MVT::i64) return X86::DEC64m;
2015 if (LdVT == MVT::i32) return X86::DEC32m;
2016 if (LdVT == MVT::i16) return X86::DEC16m;
2017 if (LdVT == MVT::i8) return X86::DEC8m;
2019 assert(Opc == X86ISD::INC && "unrecognized opcode");
2020 if (LdVT == MVT::i64) return X86::INC64m;
2021 if (LdVT == MVT::i32) return X86::INC32m;
2022 if (LdVT == MVT::i16) return X86::INC16m;
2023 if (LdVT == MVT::i8) return X86::INC8m;
2025 llvm_unreachable("unrecognized size for LdVT");
2028 /// SelectGather - Customized ISel for GATHER operations.
2030 SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
2031 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
2032 SDValue Chain = Node->getOperand(0);
2033 SDValue VSrc = Node->getOperand(2);
2034 SDValue Base = Node->getOperand(3);
2035 SDValue VIdx = Node->getOperand(4);
2036 SDValue VMask = Node->getOperand(5);
2037 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
2041 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
2044 // Memory Operands: Base, Scale, Index, Disp, Segment
2045 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32);
2046 SDValue Segment = CurDAG->getRegister(0, MVT::i32);
2047 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
2048 Disp, Segment, VMask, Chain};
2049 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops);
2050 // Node has 2 outputs: VDst and MVT::Other.
2051 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
2052 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
2054 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
2055 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
2059 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
2060 MVT NVT = Node->getSimpleValueType(0);
2062 unsigned Opcode = Node->getOpcode();
2065 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
2067 if (Node->isMachineOpcode()) {
2068 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
2069 Node->setNodeId(-1);
2070 return nullptr; // Already selected.
2075 case ISD::INTRINSIC_W_CHAIN: {
2076 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2079 case Intrinsic::x86_avx2_gather_d_pd:
2080 case Intrinsic::x86_avx2_gather_d_pd_256:
2081 case Intrinsic::x86_avx2_gather_q_pd:
2082 case Intrinsic::x86_avx2_gather_q_pd_256:
2083 case Intrinsic::x86_avx2_gather_d_ps:
2084 case Intrinsic::x86_avx2_gather_d_ps_256:
2085 case Intrinsic::x86_avx2_gather_q_ps:
2086 case Intrinsic::x86_avx2_gather_q_ps_256:
2087 case Intrinsic::x86_avx2_gather_d_q:
2088 case Intrinsic::x86_avx2_gather_d_q_256:
2089 case Intrinsic::x86_avx2_gather_q_q:
2090 case Intrinsic::x86_avx2_gather_q_q_256:
2091 case Intrinsic::x86_avx2_gather_d_d:
2092 case Intrinsic::x86_avx2_gather_d_d_256:
2093 case Intrinsic::x86_avx2_gather_q_d:
2094 case Intrinsic::x86_avx2_gather_q_d_256: {
2095 if (!Subtarget->hasAVX2())
2099 default: llvm_unreachable("Impossible intrinsic");
2100 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break;
2101 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
2102 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break;
2103 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
2104 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break;
2105 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
2106 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break;
2107 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
2108 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break;
2109 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break;
2110 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break;
2111 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break;
2112 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break;
2113 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break;
2114 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
2115 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
2117 SDNode *RetVal = SelectGather(Node, Opc);
2119 // We already called ReplaceUses inside SelectGather.
2126 case X86ISD::GlobalBaseReg:
2127 return getGlobalBaseReg();
2130 case ISD::ATOMIC_LOAD_XOR:
2131 case ISD::ATOMIC_LOAD_AND:
2132 case ISD::ATOMIC_LOAD_OR:
2133 case ISD::ATOMIC_LOAD_ADD: {
2134 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
2142 // For operations of the form (x << C1) op C2, check if we can use a smaller
2143 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2144 SDValue N0 = Node->getOperand(0);
2145 SDValue N1 = Node->getOperand(1);
2147 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2150 // i8 is unshrinkable, i16 should be promoted to i32.
2151 if (NVT != MVT::i32 && NVT != MVT::i64)
2154 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2155 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2156 if (!Cst || !ShlCst)
2159 int64_t Val = Cst->getSExtValue();
2160 uint64_t ShlVal = ShlCst->getZExtValue();
2162 // Make sure that we don't change the operation by removing bits.
2163 // This only matters for OR and XOR, AND is unaffected.
2164 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2165 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2171 // Check the minimum bitwidth for the new constant.
2172 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2173 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2174 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2175 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2177 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2180 // Bail if there is no smaller encoding.
2184 switch (NVT.SimpleTy) {
2185 default: llvm_unreachable("Unsupported VT!");
2187 assert(CstVT == MVT::i8);
2188 ShlOp = X86::SHL32ri;
2191 default: llvm_unreachable("Impossible opcode");
2192 case ISD::AND: Op = X86::AND32ri8; break;
2193 case ISD::OR: Op = X86::OR32ri8; break;
2194 case ISD::XOR: Op = X86::XOR32ri8; break;
2198 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2199 ShlOp = X86::SHL64ri;
2202 default: llvm_unreachable("Impossible opcode");
2203 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2204 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2205 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2210 // Emit the smaller op and the shift.
2211 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
2212 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2213 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2216 case X86ISD::UMUL: {
2217 SDValue N0 = Node->getOperand(0);
2218 SDValue N1 = Node->getOperand(1);
2221 switch (NVT.SimpleTy) {
2222 default: llvm_unreachable("Unsupported VT!");
2223 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
2224 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2225 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2226 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2229 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2230 N0, SDValue()).getValue(1);
2232 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2233 SDValue Ops[] = {N1, InFlag};
2234 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2236 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2237 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2238 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
2242 case ISD::SMUL_LOHI:
2243 case ISD::UMUL_LOHI: {
2244 SDValue N0 = Node->getOperand(0);
2245 SDValue N1 = Node->getOperand(1);
2247 bool isSigned = Opcode == ISD::SMUL_LOHI;
2248 bool hasBMI2 = Subtarget->hasBMI2();
2250 switch (NVT.SimpleTy) {
2251 default: llvm_unreachable("Unsupported VT!");
2252 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2253 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2254 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2255 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2256 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2257 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
2260 switch (NVT.SimpleTy) {
2261 default: llvm_unreachable("Unsupported VT!");
2262 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2263 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2264 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2265 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2269 unsigned SrcReg, LoReg, HiReg;
2271 default: llvm_unreachable("Unknown MUL opcode!");
2274 SrcReg = LoReg = X86::AL; HiReg = X86::AH;
2278 SrcReg = LoReg = X86::AX; HiReg = X86::DX;
2282 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2286 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2289 SrcReg = X86::EDX; LoReg = HiReg = 0;
2292 SrcReg = X86::RDX; LoReg = HiReg = 0;
2296 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2297 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2298 // Multiply is commmutative.
2300 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2305 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
2306 N0, SDValue()).getValue(1);
2307 SDValue ResHi, ResLo;
2311 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2313 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
2314 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
2315 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2316 ResHi = SDValue(CNode, 0);
2317 ResLo = SDValue(CNode, 1);
2318 Chain = SDValue(CNode, 2);
2319 InFlag = SDValue(CNode, 3);
2321 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
2322 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2323 Chain = SDValue(CNode, 0);
2324 InFlag = SDValue(CNode, 1);
2327 // Update the chain.
2328 ReplaceUses(N1.getValue(1), Chain);
2330 SDValue Ops[] = { N1, InFlag };
2331 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
2332 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
2333 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2334 ResHi = SDValue(CNode, 0);
2335 ResLo = SDValue(CNode, 1);
2336 InFlag = SDValue(CNode, 2);
2338 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
2339 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2340 InFlag = SDValue(CNode, 0);
2344 // Prevent use of AH in a REX instruction by referencing AX instead.
2345 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2346 !SDValue(Node, 1).use_empty()) {
2347 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2348 X86::AX, MVT::i16, InFlag);
2349 InFlag = Result.getValue(2);
2350 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2352 if (!SDValue(Node, 0).use_empty())
2353 ReplaceUses(SDValue(Node, 1),
2354 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2356 // Shift AX down 8 bits.
2357 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2359 CurDAG->getTargetConstant(8, MVT::i8)), 0);
2360 // Then truncate it down to i8.
2361 ReplaceUses(SDValue(Node, 1),
2362 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2364 // Copy the low half of the result, if it is needed.
2365 if (!SDValue(Node, 0).use_empty()) {
2366 if (!ResLo.getNode()) {
2367 assert(LoReg && "Register for low half is not defined!");
2368 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
2370 InFlag = ResLo.getValue(2);
2372 ReplaceUses(SDValue(Node, 0), ResLo);
2373 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
2375 // Copy the high half of the result, if it is needed.
2376 if (!SDValue(Node, 1).use_empty()) {
2377 if (!ResHi.getNode()) {
2378 assert(HiReg && "Register for high half is not defined!");
2379 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
2381 InFlag = ResHi.getValue(2);
2383 ReplaceUses(SDValue(Node, 1), ResHi);
2384 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
2391 case ISD::UDIVREM: {
2392 SDValue N0 = Node->getOperand(0);
2393 SDValue N1 = Node->getOperand(1);
2395 bool isSigned = Opcode == ISD::SDIVREM;
2397 switch (NVT.SimpleTy) {
2398 default: llvm_unreachable("Unsupported VT!");
2399 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2400 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2401 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2402 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2405 switch (NVT.SimpleTy) {
2406 default: llvm_unreachable("Unsupported VT!");
2407 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2408 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2409 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2410 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2414 unsigned LoReg, HiReg, ClrReg;
2415 unsigned SExtOpcode;
2416 switch (NVT.SimpleTy) {
2417 default: llvm_unreachable("Unsupported VT!");
2419 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
2420 SExtOpcode = X86::CBW;
2423 LoReg = X86::AX; HiReg = X86::DX;
2425 SExtOpcode = X86::CWD;
2428 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2429 SExtOpcode = X86::CDQ;
2432 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2433 SExtOpcode = X86::CQO;
2437 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2438 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2439 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2442 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2443 // Special case for div8, just use a move with zero extension to AX to
2444 // clear the upper 8 bits (AH).
2445 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2446 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2447 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2449 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2450 MVT::Other, Ops), 0);
2451 Chain = Move.getValue(1);
2452 ReplaceUses(N0.getValue(1), Chain);
2455 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2456 Chain = CurDAG->getEntryNode();
2458 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2459 InFlag = Chain.getValue(1);
2462 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2463 LoReg, N0, SDValue()).getValue(1);
2464 if (isSigned && !signBitIsZero) {
2465 // Sign extend the low part into the high part.
2467 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2469 // Zero out the high part, effectively zero extending the input.
2470 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
2471 switch (NVT.SimpleTy) {
2474 SDValue(CurDAG->getMachineNode(
2475 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
2476 CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)),
2483 SDValue(CurDAG->getMachineNode(
2484 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2485 CurDAG->getTargetConstant(0, MVT::i64), ClrNode,
2486 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
2490 llvm_unreachable("Unexpected division source");
2493 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2494 ClrNode, InFlag).getValue(1);
2499 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2502 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
2503 InFlag = SDValue(CNode, 1);
2504 // Update the chain.
2505 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2508 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2511 // Prevent use of AH in a REX instruction by referencing AX instead.
2512 // Shift it down 8 bits.
2514 // The current assumption of the register allocator is that isel
2515 // won't generate explicit references to the GPR8_NOREX registers. If
2516 // the allocator and/or the backend get enhanced to be more robust in
2517 // that regard, this can be, and should be, removed.
2518 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2519 !SDValue(Node, 1).use_empty()) {
2520 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2521 X86::AX, MVT::i16, InFlag);
2522 InFlag = Result.getValue(2);
2524 // If we also need AL (the quotient), get it by extracting a subreg from
2525 // Result. The fast register allocator does not like multiple CopyFromReg
2526 // nodes using aliasing registers.
2527 if (!SDValue(Node, 0).use_empty())
2528 ReplaceUses(SDValue(Node, 0),
2529 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2531 // Shift AX right by 8 bits instead of using AH.
2532 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2534 CurDAG->getTargetConstant(8, MVT::i8)),
2536 ReplaceUses(SDValue(Node, 1),
2537 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2539 // Copy the division (low) result, if it is needed.
2540 if (!SDValue(Node, 0).use_empty()) {
2541 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2542 LoReg, NVT, InFlag);
2543 InFlag = Result.getValue(2);
2544 ReplaceUses(SDValue(Node, 0), Result);
2545 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2547 // Copy the remainder (high) result, if it is needed.
2548 if (!SDValue(Node, 1).use_empty()) {
2549 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2550 HiReg, NVT, InFlag);
2551 InFlag = Result.getValue(2);
2552 ReplaceUses(SDValue(Node, 1), Result);
2553 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2560 // Sometimes a SUB is used to perform comparison.
2561 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2562 // This node is not a CMP.
2564 SDValue N0 = Node->getOperand(0);
2565 SDValue N1 = Node->getOperand(1);
2567 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2568 // use a smaller encoding.
2569 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2570 HasNoSignedComparisonUses(Node))
2571 // Look past the truncate if CMP is the only use of it.
2572 N0 = N0.getOperand(0);
2573 if ((N0.getNode()->getOpcode() == ISD::AND ||
2574 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2575 N0.getNode()->hasOneUse() &&
2576 N0.getValueType() != MVT::i8 &&
2577 X86::isZeroNode(N1)) {
2578 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2581 // For example, convert "testl %eax, $8" to "testb %al, $8"
2582 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2583 (!(C->getZExtValue() & 0x80) ||
2584 HasNoSignedComparisonUses(Node))) {
2585 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2586 SDValue Reg = N0.getNode()->getOperand(0);
2588 // On x86-32, only the ABCD registers have 8-bit subregisters.
2589 if (!Subtarget->is64Bit()) {
2590 const TargetRegisterClass *TRC;
2591 switch (N0.getSimpleValueType().SimpleTy) {
2592 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2593 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2594 default: llvm_unreachable("Unsupported TEST operand type!");
2596 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2597 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2598 Reg.getValueType(), Reg, RC), 0);
2601 // Extract the l-register.
2602 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2606 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2608 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2609 // one, do not call ReplaceAllUsesWith.
2610 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2611 SDValue(NewNode, 0));
2615 // For example, "testl %eax, $2048" to "testb %ah, $8".
2616 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2617 (!(C->getZExtValue() & 0x8000) ||
2618 HasNoSignedComparisonUses(Node))) {
2619 // Shift the immediate right by 8 bits.
2620 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2622 SDValue Reg = N0.getNode()->getOperand(0);
2624 // Put the value in an ABCD register.
2625 const TargetRegisterClass *TRC;
2626 switch (N0.getSimpleValueType().SimpleTy) {
2627 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2628 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2629 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2630 default: llvm_unreachable("Unsupported TEST operand type!");
2632 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2633 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2634 Reg.getValueType(), Reg, RC), 0);
2636 // Extract the h-register.
2637 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2640 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
2641 // target GR8_NOREX registers, so make sure the register class is
2643 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
2644 MVT::i32, Subreg, ShiftedImm);
2645 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2646 // one, do not call ReplaceAllUsesWith.
2647 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2648 SDValue(NewNode, 0));
2652 // For example, "testl %eax, $32776" to "testw %ax, $32776".
2653 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2654 N0.getValueType() != MVT::i16 &&
2655 (!(C->getZExtValue() & 0x8000) ||
2656 HasNoSignedComparisonUses(Node))) {
2657 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2658 SDValue Reg = N0.getNode()->getOperand(0);
2660 // Extract the 16-bit subregister.
2661 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2665 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
2667 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2668 // one, do not call ReplaceAllUsesWith.
2669 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2670 SDValue(NewNode, 0));
2674 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2675 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2676 N0.getValueType() == MVT::i64 &&
2677 (!(C->getZExtValue() & 0x80000000) ||
2678 HasNoSignedComparisonUses(Node))) {
2679 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2680 SDValue Reg = N0.getNode()->getOperand(0);
2682 // Extract the 32-bit subregister.
2683 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2687 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
2689 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2690 // one, do not call ReplaceAllUsesWith.
2691 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2692 SDValue(NewNode, 0));
2699 // Change a chain of {load; incr or dec; store} of the same value into
2700 // a simple increment or decrement through memory of that value, if the
2701 // uses of the modified value and its address are suitable.
2702 // The DEC64m tablegen pattern is currently not able to match the case where
2703 // the EFLAGS on the original DEC are used. (This also applies to
2704 // {INC,DEC}X{64,32,16,8}.)
2705 // We'll need to improve tablegen to allow flags to be transferred from a
2706 // node in the pattern to the result node. probably with a new keyword
2707 // for example, we have this
2708 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2709 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2710 // (implicit EFLAGS)]>;
2711 // but maybe need something like this
2712 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2713 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2714 // (transferrable EFLAGS)]>;
2716 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2717 SDValue StoredVal = StoreNode->getOperand(1);
2718 unsigned Opc = StoredVal->getOpcode();
2720 LoadSDNode *LoadNode = nullptr;
2722 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2723 LoadNode, InputChain))
2726 SDValue Base, Scale, Index, Disp, Segment;
2727 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2728 Base, Scale, Index, Disp, Segment))
2731 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2732 MemOp[0] = StoreNode->getMemOperand();
2733 MemOp[1] = LoadNode->getMemOperand();
2734 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2735 EVT LdVT = LoadNode->getMemoryVT();
2736 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
2737 MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
2739 MVT::i32, MVT::Other, Ops);
2740 Result->setMemRefs(MemOp, MemOp + 2);
2742 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2743 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2749 SDNode *ResNode = SelectCode(Node);
2751 DEBUG(dbgs() << "=> ";
2752 if (ResNode == nullptr || ResNode == Node)
2755 ResNode->dump(CurDAG);
2761 bool X86DAGToDAGISel::
2762 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2763 std::vector<SDValue> &OutOps) {
2764 SDValue Op0, Op1, Op2, Op3, Op4;
2765 switch (ConstraintCode) {
2766 case 'o': // offsetable ??
2767 case 'v': // not offsetable ??
2768 default: return true;
2770 if (!SelectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
2775 OutOps.push_back(Op0);
2776 OutOps.push_back(Op1);
2777 OutOps.push_back(Op2);
2778 OutOps.push_back(Op3);
2779 OutOps.push_back(Op4);
2783 /// createX86ISelDag - This pass converts a legalized DAG into a
2784 /// X86-specific DAG, ready for instruction scheduling.
2786 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2787 CodeGenOpt::Level OptLevel) {
2788 return new X86DAGToDAGISel(TM, OptLevel);