1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Support/CFG.h"
27 #include "llvm/Type.h"
28 #include "llvm/CodeGen/MachineConstantPool.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/Streams.h"
40 #include "llvm/ADT/SmallPtrSet.h"
41 #include "llvm/ADT/Statistic.h"
46 STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
47 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
49 //===----------------------------------------------------------------------===//
50 // Pattern Matcher Implementation
51 //===----------------------------------------------------------------------===//
54 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
55 /// SDValue's instead of register numbers for the leaves of the matched
57 struct X86ISelAddressMode {
63 struct { // This is really a union, discriminated by BaseType!
68 bool isRIPRel; // RIP as base?
76 unsigned Align; // CP alignment.
79 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
80 GV(0), CP(0), ES(0), JT(-1), Align(0) {
83 cerr << "X86ISelAddressMode " << this << "\n";
85 if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
87 cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
88 cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n";
90 if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
92 cerr << " Disp " << Disp << "\n";
93 cerr << "GV "; if (GV) GV->dump();
95 cerr << " CP "; if (CP) CP->dump();
98 cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
99 cerr << " JT" << JT << " Align" << Align << "\n";
105 //===--------------------------------------------------------------------===//
106 /// ISel - X86 specific code to select X86 machine instructions for
107 /// SelectionDAG operations.
109 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
110 /// ContainsFPCode - Every instruction we select that uses or defines a FP
111 /// register should set this to true.
114 /// TM - Keep a reference to X86TargetMachine.
116 X86TargetMachine &TM;
118 /// X86Lowering - This object fully describes how to lower LLVM code to an
119 /// X86-specific SelectionDAG.
120 X86TargetLowering X86Lowering;
122 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
123 /// make the right decision when generating code for different targets.
124 const X86Subtarget *Subtarget;
126 /// CurBB - Current BB being isel'd.
128 MachineBasicBlock *CurBB;
130 /// OptForSize - If true, selector should try to optimize for code size
131 /// instead of performance.
135 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
136 : SelectionDAGISel(X86Lowering, fast),
137 ContainsFPCode(false), TM(tm),
138 X86Lowering(*TM.getTargetLowering()),
139 Subtarget(&TM.getSubtarget<X86Subtarget>()),
142 virtual const char *getPassName() const {
143 return "X86 DAG->DAG Instruction Selection";
146 /// InstructionSelect - This callback is invoked by
147 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
148 virtual void InstructionSelect();
150 /// InstructionSelectPostProcessing - Post processing of selected and
151 /// scheduled basic blocks.
152 virtual void InstructionSelectPostProcessing();
154 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
156 virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const;
158 // Include the pieces autogenerated from the target description.
159 #include "X86GenDAGISel.inc"
162 SDNode *Select(SDValue N);
163 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
165 bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
166 bool isRoot = true, unsigned Depth = 0);
167 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
168 bool isRoot, unsigned Depth);
169 bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
170 SDValue &Scale, SDValue &Index, SDValue &Disp);
171 bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
172 SDValue &Scale, SDValue &Index, SDValue &Disp);
173 bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
174 SDValue N, SDValue &Base, SDValue &Scale,
175 SDValue &Index, SDValue &Disp,
176 SDValue &InChain, SDValue &OutChain);
177 bool TryFoldLoad(SDValue P, SDValue N,
178 SDValue &Base, SDValue &Scale,
179 SDValue &Index, SDValue &Disp);
180 void PreprocessForRMW();
181 void PreprocessForFPConvert();
183 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
184 /// inline asm expressions.
185 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
187 std::vector<SDValue> &OutOps);
189 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
191 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
192 SDValue &Scale, SDValue &Index,
194 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
195 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
197 Scale = getI8Imm(AM.Scale);
199 // These are 32-bit even in 64-bit mode since RIP relative offset
202 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
204 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
207 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
208 else if (AM.JT != -1)
209 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
211 Disp = getI32Imm(AM.Disp);
214 /// getI8Imm - Return a target constant with the specified value, of type
216 inline SDValue getI8Imm(unsigned Imm) {
217 return CurDAG->getTargetConstant(Imm, MVT::i8);
220 /// getI16Imm - Return a target constant with the specified value, of type
222 inline SDValue getI16Imm(unsigned Imm) {
223 return CurDAG->getTargetConstant(Imm, MVT::i16);
226 /// getI32Imm - Return a target constant with the specified value, of type
228 inline SDValue getI32Imm(unsigned Imm) {
229 return CurDAG->getTargetConstant(Imm, MVT::i32);
232 /// getGlobalBaseReg - Return an SDNode that returns the value of
233 /// the global base register. Output instructions required to
234 /// initialize the global base register, if necessary.
236 SDNode *getGlobalBaseReg();
238 /// getTruncateTo8Bit - return an SDNode that implements a subreg based
239 /// truncate of the specified operand to i8. This can be done with tablegen,
240 /// except that this code uses MVT::Flag in a tricky way that happens to
241 /// improve scheduling in some cases.
242 SDNode *getTruncateTo8Bit(SDValue N0);
250 /// findFlagUse - Return use of MVT::Flag value produced by the specified
253 static SDNode *findFlagUse(SDNode *N) {
254 unsigned FlagResNo = N->getNumValues()-1;
255 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
257 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
258 SDValue Op = User->getOperand(i);
259 if (Op.getNode() == N && Op.getResNo() == FlagResNo)
266 /// findNonImmUse - Return true by reference in "found" if "Use" is an
267 /// non-immediate use of "Def". This function recursively traversing
268 /// up the operand chain ignoring certain nodes.
269 static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
270 SDNode *Root, bool &found,
271 SmallPtrSet<SDNode*, 16> &Visited) {
273 Use->getNodeId() < Def->getNodeId() ||
274 !Visited.insert(Use))
277 for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
278 SDNode *N = Use->getOperand(i).getNode();
280 if (Use == ImmedUse || Use == Root)
281 continue; // We are not looking for immediate use.
287 // Traverse up the operand chain.
288 findNonImmUse(N, Def, ImmedUse, Root, found, Visited);
292 /// isNonImmUse - Start searching from Root up the DAG to check is Def can
293 /// be reached. Return true if that's the case. However, ignore direct uses
294 /// by ImmedUse (which would be U in the example illustrated in
295 /// CanBeFoldedBy) and by Root (which can happen in the store case).
296 /// FIXME: to be really generic, we should allow direct use by any node
297 /// that is being folded. But realisticly since we only fold loads which
298 /// have one non-chain use, we only need to watch out for load/op/store
299 /// and load/op/cmp case where the root (store / cmp) may reach the load via
300 /// its chain operand.
301 static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse) {
302 SmallPtrSet<SDNode*, 16> Visited;
304 findNonImmUse(Root, Def, ImmedUse, Root, found, Visited);
309 bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const {
310 if (Fast) return false;
312 // If Root use can somehow reach N through a path that that doesn't contain
313 // U then folding N would create a cycle. e.g. In the following
314 // diagram, Root can reach N through X. If N is folded into into Root, then
315 // X is both a predecessor and a successor of U.
326 // * indicates nodes to be folded together.
328 // If Root produces a flag, then it gets (even more) interesting. Since it
329 // will be "glued" together with its flag use in the scheduler, we need to
330 // check if it might reach N.
349 // If FU (flag use) indirectly reaches N (the load), and Root folds N
350 // (call it Fold), then X is a predecessor of FU and a successor of
351 // Fold. But since Fold and FU are flagged together, this will create
352 // a cycle in the scheduling graph.
354 MVT VT = Root->getValueType(Root->getNumValues()-1);
355 while (VT == MVT::Flag) {
356 SDNode *FU = findFlagUse(Root);
360 VT = Root->getValueType(Root->getNumValues()-1);
363 return !isNonImmUse(Root, N, U);
366 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
367 /// and move load below the TokenFactor. Replace store's chain operand with
368 /// load's chain result.
369 static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load,
370 SDValue Store, SDValue TF) {
371 SmallVector<SDValue, 4> Ops;
372 for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i)
373 if (Load.getNode() == TF.getOperand(i).getNode())
374 Ops.push_back(Load.getOperand(0));
376 Ops.push_back(TF.getOperand(i));
377 CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size());
378 CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
379 CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
380 Store.getOperand(2), Store.getOperand(3));
383 /// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
385 static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address,
387 if (N.getOpcode() == ISD::BIT_CONVERT)
390 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
391 if (!LD || LD->isVolatile())
393 if (LD->getAddressingMode() != ISD::UNINDEXED)
396 ISD::LoadExtType ExtType = LD->getExtensionType();
397 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
401 N.getOperand(1) == Address &&
402 N.getNode()->isOperandOf(Chain.getNode())) {
409 /// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
410 /// operand and move load below the call's chain operand.
411 static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
412 SDValue Call, SDValue Chain) {
413 SmallVector<SDValue, 8> Ops;
414 for (unsigned i = 0, e = Chain.getNode()->getNumOperands(); i != e; ++i)
415 if (Load.getNode() == Chain.getOperand(i).getNode())
416 Ops.push_back(Load.getOperand(0));
418 Ops.push_back(Chain.getOperand(i));
419 CurDAG->UpdateNodeOperands(Chain, &Ops[0], Ops.size());
420 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
421 Load.getOperand(1), Load.getOperand(2));
423 Ops.push_back(SDValue(Load.getNode(), 1));
424 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
425 Ops.push_back(Call.getOperand(i));
426 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
429 /// isCalleeLoad - Return true if call address is a load and it can be
430 /// moved below CALLSEQ_START and the chains leading up to the call.
431 /// Return the CALLSEQ_START by reference as a second output.
432 static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
433 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
435 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
438 LD->getAddressingMode() != ISD::UNINDEXED ||
439 LD->getExtensionType() != ISD::NON_EXTLOAD)
442 // Now let's find the callseq_start.
443 while (Chain.getOpcode() != ISD::CALLSEQ_START) {
444 if (!Chain.hasOneUse())
446 Chain = Chain.getOperand(0);
448 return Chain.getOperand(0).getNode() == Callee.getNode();
452 /// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
453 /// This is only run if not in -fast mode (aka -O0).
454 /// This allows the instruction selector to pick more read-modify-write
455 /// instructions. This is a common case:
465 /// [TokenFactor] [Op]
472 /// The fact the store's chain operand != load's chain will prevent the
473 /// (store (op (load))) instruction from being selected. We can transform it to:
492 void X86DAGToDAGISel::PreprocessForRMW() {
493 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
494 E = CurDAG->allnodes_end(); I != E; ++I) {
495 if (I->getOpcode() == X86ISD::CALL) {
496 /// Also try moving call address load from outside callseq_start to just
497 /// before the call to allow it to be folded.
515 SDValue Chain = I->getOperand(0);
516 SDValue Load = I->getOperand(1);
517 if (!isCalleeLoad(Load, Chain))
519 MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain);
524 if (!ISD::isNON_TRUNCStore(I))
526 SDValue Chain = I->getOperand(0);
528 if (Chain.getNode()->getOpcode() != ISD::TokenFactor)
531 SDValue N1 = I->getOperand(1);
532 SDValue N2 = I->getOperand(2);
533 if ((N1.getValueType().isFloatingPoint() &&
534 !N1.getValueType().isVector()) ||
540 unsigned Opcode = N1.getNode()->getOpcode();
549 case ISD::VECTOR_SHUFFLE: {
550 SDValue N10 = N1.getOperand(0);
551 SDValue N11 = N1.getOperand(1);
552 RModW = isRMWLoad(N10, Chain, N2, Load);
554 RModW = isRMWLoad(N11, Chain, N2, Load);
567 SDValue N10 = N1.getOperand(0);
568 RModW = isRMWLoad(N10, Chain, N2, Load);
574 MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain);
581 /// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
582 /// nodes that target the FP stack to be store and load to the stack. This is a
583 /// gross hack. We would like to simply mark these as being illegal, but when
584 /// we do that, legalize produces these when it expands calls, then expands
585 /// these in the same legalize pass. We would like dag combine to be able to
586 /// hack on these between the call expansion and the node legalization. As such
587 /// this pass basically does "really late" legalization of these inline with the
589 void X86DAGToDAGISel::PreprocessForFPConvert() {
590 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
591 E = CurDAG->allnodes_end(); I != E; ) {
592 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
593 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
596 // If the source and destination are SSE registers, then this is a legal
597 // conversion that should not be lowered.
598 MVT SrcVT = N->getOperand(0).getValueType();
599 MVT DstVT = N->getValueType(0);
600 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
601 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
602 if (SrcIsSSE && DstIsSSE)
605 if (!SrcIsSSE && !DstIsSSE) {
606 // If this is an FPStack extension, it is a noop.
607 if (N->getOpcode() == ISD::FP_EXTEND)
609 // If this is a value-preserving FPStack truncation, it is a noop.
610 if (N->getConstantOperandVal(1))
614 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
615 // FPStack has extload and truncstore. SSE can fold direct loads into other
616 // operations. Based on this, decide what we want to do.
618 if (N->getOpcode() == ISD::FP_ROUND)
619 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
621 MemVT = SrcIsSSE ? SrcVT : DstVT;
623 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
625 // FIXME: optimize the case where the src/dest is a load or store?
626 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(),
628 MemTmp, NULL, 0, MemVT);
629 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
632 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
633 // extload we created. This will cause general havok on the dag because
634 // anything below the conversion could be folded into other existing nodes.
635 // To avoid invalidating 'I', back it up to the convert node.
637 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
639 // Now that we did that, the node is dead. Increment the iterator to the
640 // next node to process, then delete N.
642 CurDAG->DeleteNode(N);
646 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
647 /// when it has created a SelectionDAG for us to codegen.
648 void X86DAGToDAGISel::InstructionSelect() {
649 CurBB = BB; // BB can change as result of isel.
651 const Function *F = CurDAG->getMachineFunction().getFunction();
652 OptForSize = !F->isDeclaration() &&
653 F->hasFnAttr(Attribute::OptimizeForSize);
660 // FIXME: This should only happen when not -fast.
661 PreprocessForFPConvert();
663 // Codegen the basic block.
665 DOUT << "===== Instruction selection begins:\n";
670 DOUT << "===== Instruction selection ends:\n";
673 CurDAG->RemoveDeadNodes();
676 void X86DAGToDAGISel::InstructionSelectPostProcessing() {
677 // If we are emitting FP stack code, scan the basic block to determine if this
678 // block defines any FP values. If so, put an FP_REG_KILL instruction before
679 // the terminator of the block.
681 // Note that FP stack instructions are used in all modes for long double,
682 // so we always need to do this check.
683 // Also note that it's possible for an FP stack register to be live across
684 // an instruction that produces multiple basic blocks (SSE CMOV) so we
685 // must check all the generated basic blocks.
687 // Scan all of the machine instructions in these MBBs, checking for FP
688 // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
689 MachineFunction::iterator MBBI = CurBB;
690 MachineFunction::iterator EndMBB = BB; ++EndMBB;
691 for (; MBBI != EndMBB; ++MBBI) {
692 MachineBasicBlock *MBB = MBBI;
694 // If this block returns, ignore it. We don't want to insert an FP_REG_KILL
695 // before the return.
697 MachineBasicBlock::iterator EndI = MBB->end();
699 if (EndI->getDesc().isReturn())
703 bool ContainsFPCode = false;
704 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
705 !ContainsFPCode && I != E; ++I) {
706 if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
707 const TargetRegisterClass *clas;
708 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
709 if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
710 TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
711 ((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
712 X86::RFP32RegisterClass ||
713 clas == X86::RFP64RegisterClass ||
714 clas == X86::RFP80RegisterClass)) {
715 ContainsFPCode = true;
721 // Check PHI nodes in successor blocks. These PHI's will be lowered to have
722 // a copy of the input value in this block. In SSE mode, we only care about
724 if (!ContainsFPCode) {
725 // Final check, check LLVM BB's that are successors to the LLVM BB
726 // corresponding to BB for FP PHI nodes.
727 const BasicBlock *LLVMBB = BB->getBasicBlock();
729 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
730 !ContainsFPCode && SI != E; ++SI) {
731 for (BasicBlock::const_iterator II = SI->begin();
732 (PN = dyn_cast<PHINode>(II)); ++II) {
733 if (PN->getType()==Type::X86_FP80Ty ||
734 (!Subtarget->hasSSE1() && PN->getType()->isFloatingPoint()) ||
735 (!Subtarget->hasSSE2() && PN->getType()==Type::DoubleTy)) {
736 ContainsFPCode = true;
742 // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
743 if (ContainsFPCode) {
744 BuildMI(*MBB, MBBI->getFirstTerminator(),
745 TM.getInstrInfo()->get(X86::FP_REG_KILL));
751 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
752 /// the main function.
753 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
754 MachineFrameInfo *MFI) {
755 const TargetInstrInfo *TII = TM.getInstrInfo();
756 if (Subtarget->isTargetCygMing())
757 BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
760 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
761 // If this is main, emit special code for main.
762 MachineBasicBlock *BB = MF.begin();
763 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
764 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
767 /// MatchAddress - Add the specified node to the specified addressing mode,
768 /// returning true if it cannot be done. This just pattern matches for the
770 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
771 bool isRoot, unsigned Depth) {
772 DOUT << "MatchAddress: "; DEBUG(AM.dump());
775 return MatchAddressBase(N, AM, isRoot, Depth);
777 // RIP relative addressing: %rip + 32-bit displacement!
779 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
780 int64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
781 if (isInt32(AM.Disp + Val)) {
789 int id = N.getNode()->getNodeId();
790 bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
792 switch (N.getOpcode()) {
794 case ISD::Constant: {
795 int64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
796 if (isInt32(AM.Disp + Val)) {
803 case X86ISD::Wrapper: {
804 DOUT << "Wrapper: 64bit " << Subtarget->is64Bit();
805 DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
806 DOUT << "AlreadySelected " << AlreadySelected << "\n";
807 bool is64Bit = Subtarget->is64Bit();
808 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
809 // Also, base and index reg must be 0 in order to use rip as base.
810 if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
811 AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
813 if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
815 // If value is available in a register both base and index components have
816 // been picked, we can't fit the result available in the register in the
817 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
818 if (!AlreadySelected || (AM.Base.Reg.getNode() && AM.IndexReg.getNode())) {
819 SDValue N0 = N.getOperand(0);
820 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
821 GlobalValue *GV = G->getGlobal();
823 AM.Disp += G->getOffset();
824 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
826 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
827 AM.CP = CP->getConstVal();
828 AM.Align = CP->getAlignment();
829 AM.Disp += CP->getOffset();
830 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
832 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
833 AM.ES = S->getSymbol();
834 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
836 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
837 AM.JT = J->getIndex();
838 AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
845 case ISD::FrameIndex:
846 if (AM.BaseType == X86ISelAddressMode::RegBase
847 && AM.Base.Reg.getNode() == 0) {
848 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
849 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
855 if (AlreadySelected || AM.IndexReg.getNode() != 0
856 || AM.Scale != 1 || AM.isRIPRel)
860 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
861 unsigned Val = CN->getZExtValue();
862 if (Val == 1 || Val == 2 || Val == 3) {
864 SDValue ShVal = N.getNode()->getOperand(0);
866 // Okay, we know that we have a scale by now. However, if the scaled
867 // value is an add of something and a constant, we can fold the
868 // constant into the disp field here.
869 if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
870 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
871 AM.IndexReg = ShVal.getNode()->getOperand(0);
872 ConstantSDNode *AddVal =
873 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
874 uint64_t Disp = AM.Disp + (AddVal->getZExtValue() << Val);
889 // A mul_lohi where we need the low part can be folded as a plain multiply.
890 if (N.getResNo() != 0) break;
893 // X*[3,5,9] -> X+X*[2,4,8]
894 if (!AlreadySelected &&
895 AM.BaseType == X86ISelAddressMode::RegBase &&
896 AM.Base.Reg.getNode() == 0 &&
897 AM.IndexReg.getNode() == 0 &&
900 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
901 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
902 CN->getZExtValue() == 9) {
903 AM.Scale = unsigned(CN->getZExtValue())-1;
905 SDValue MulVal = N.getNode()->getOperand(0);
908 // Okay, we know that we have a scale by now. However, if the scaled
909 // value is an add of something and a constant, we can fold the
910 // constant into the disp field here.
911 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
912 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
913 Reg = MulVal.getNode()->getOperand(0);
914 ConstantSDNode *AddVal =
915 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
916 uint64_t Disp = AM.Disp + AddVal->getZExtValue() *
921 Reg = N.getNode()->getOperand(0);
923 Reg = N.getNode()->getOperand(0);
926 AM.IndexReg = AM.Base.Reg = Reg;
933 if (!AlreadySelected) {
934 X86ISelAddressMode Backup = AM;
935 if (!MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1) &&
936 !MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1))
939 if (!MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1) &&
940 !MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1))
947 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
948 if (AlreadySelected) break;
950 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
951 X86ISelAddressMode Backup = AM;
952 // Start with the LHS as an addr mode.
953 if (!MatchAddress(N.getOperand(0), AM, false) &&
954 // Address could not have picked a GV address for the displacement.
956 // On x86-64, the resultant disp must fit in 32-bits.
957 isInt32(AM.Disp + CN->getSExtValue()) &&
958 // Check to see if the LHS & C is zero.
959 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
960 AM.Disp += CN->getZExtValue();
968 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
969 // allows us to fold the shift into this addressing mode.
970 if (AlreadySelected) break;
971 SDValue Shift = N.getOperand(0);
972 if (Shift.getOpcode() != ISD::SHL) break;
974 // Scale must not be used already.
975 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
977 // Not when RIP is used as the base.
978 if (AM.isRIPRel) break;
980 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
981 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
982 if (!C1 || !C2) break;
984 // Not likely to be profitable if either the AND or SHIFT node has more
985 // than one use (unless all uses are for address computation). Besides,
986 // isel mechanism requires their node ids to be reused.
987 if (!N.hasOneUse() || !Shift.hasOneUse())
990 // Verify that the shift amount is something we can fold.
991 unsigned ShiftCst = C1->getZExtValue();
992 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
995 // Get the new AND mask, this folds to a constant.
996 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
997 SDValue(C2, 0), SDValue(C1, 0));
998 SDValue NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
999 Shift.getOperand(0), NewANDMask);
1000 NewANDMask.getNode()->setNodeId(Shift.getNode()->getNodeId());
1001 NewAND.getNode()->setNodeId(N.getNode()->getNodeId());
1003 AM.Scale = 1 << ShiftCst;
1004 AM.IndexReg = NewAND;
1009 return MatchAddressBase(N, AM, isRoot, Depth);
1012 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1013 /// specified addressing mode without any further recursion.
1014 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
1015 bool isRoot, unsigned Depth) {
1016 // Is the base register already occupied?
1017 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
1018 // If so, check to see if the scale index register is set.
1019 if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) {
1025 // Otherwise, we cannot select it.
1029 // Default, generate it as a register.
1030 AM.BaseType = X86ISelAddressMode::RegBase;
1035 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1036 /// It returns the operands which make up the maximal addressing mode it can
1037 /// match by reference.
1038 bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
1039 SDValue &Scale, SDValue &Index,
1041 X86ISelAddressMode AM;
1042 if (MatchAddress(N, AM))
1045 MVT VT = N.getValueType();
1046 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1047 if (!AM.Base.Reg.getNode())
1048 AM.Base.Reg = CurDAG->getRegister(0, VT);
1051 if (!AM.IndexReg.getNode())
1052 AM.IndexReg = CurDAG->getRegister(0, VT);
1054 getAddressOperands(AM, Base, Scale, Index, Disp);
1058 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1059 /// match a load whose top elements are either undef or zeros. The load flavor
1060 /// is derived from the type of N, which is either v4f32 or v2f64.
1061 bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
1062 SDValue N, SDValue &Base,
1063 SDValue &Scale, SDValue &Index,
1064 SDValue &Disp, SDValue &InChain,
1065 SDValue &OutChain) {
1066 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1067 InChain = N.getOperand(0).getValue(1);
1068 if (ISD::isNON_EXTLoad(InChain.getNode()) &&
1069 InChain.getValue(0).hasOneUse() &&
1071 CanBeFoldedBy(N.getNode(), Pred.getNode(), Op.getNode())) {
1072 LoadSDNode *LD = cast<LoadSDNode>(InChain);
1073 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
1075 OutChain = LD->getChain();
1080 // Also handle the case where we explicitly require zeros in the top
1081 // elements. This is a vector shuffle from the zero vector.
1082 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1083 // Check to see if the top elements are all zeros (or bitcast of zeros).
1084 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1085 N.getOperand(0).getNode()->hasOneUse() &&
1086 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1087 N.getOperand(0).getOperand(0).hasOneUse()) {
1088 // Okay, this is a zero extending load. Fold it.
1089 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1090 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
1092 OutChain = LD->getChain();
1093 InChain = SDValue(LD, 1);
1100 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1101 /// mode it matches can be cost effectively emitted as an LEA instruction.
1102 bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
1103 SDValue &Base, SDValue &Scale,
1104 SDValue &Index, SDValue &Disp) {
1105 X86ISelAddressMode AM;
1106 if (MatchAddress(N, AM))
1109 MVT VT = N.getValueType();
1110 unsigned Complexity = 0;
1111 if (AM.BaseType == X86ISelAddressMode::RegBase)
1112 if (AM.Base.Reg.getNode())
1115 AM.Base.Reg = CurDAG->getRegister(0, VT);
1116 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1119 if (AM.IndexReg.getNode())
1122 AM.IndexReg = CurDAG->getRegister(0, VT);
1124 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1129 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1130 // to a LEA. This is determined with some expermentation but is by no means
1131 // optimal (especially for code size consideration). LEA is nice because of
1132 // its three-address nature. Tweak the cost function again when we can run
1133 // convertToThreeAddress() at register allocation time.
1134 if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
1135 // For X86-64, we should always use lea to materialize RIP relative
1137 if (Subtarget->is64Bit())
1143 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
1146 if (Complexity > 2) {
1147 getAddressOperands(AM, Base, Scale, Index, Disp);
1153 bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
1154 SDValue &Base, SDValue &Scale,
1155 SDValue &Index, SDValue &Disp) {
1156 if (ISD::isNON_EXTLoad(N.getNode()) &&
1158 CanBeFoldedBy(N.getNode(), P.getNode(), P.getNode()))
1159 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
1163 /// getGlobalBaseReg - Return an SDNode that returns the value of
1164 /// the global base register. Output instructions required to
1165 /// initialize the global base register, if necessary.
1167 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1168 MachineFunction *MF = CurBB->getParent();
1169 unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF);
1170 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1173 static SDNode *FindCallStartFromCall(SDNode *Node) {
1174 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1175 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1176 "Node doesn't have a token chain argument!");
1177 return FindCallStartFromCall(Node->getOperand(0).getNode());
1180 /// getTruncateTo8Bit - return an SDNode that implements a subreg based
1181 /// truncate of the specified operand to i8. This can be done with tablegen,
1182 /// except that this code uses MVT::Flag in a tricky way that happens to
1183 /// improve scheduling in some cases.
1184 SDNode *X86DAGToDAGISel::getTruncateTo8Bit(SDValue N0) {
1185 assert(!Subtarget->is64Bit() &&
1186 "getTruncateTo8Bit is only needed on x86-32!");
1187 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1189 // Ensure that the source register has an 8-bit subreg on 32-bit targets
1191 MVT N0VT = N0.getValueType();
1192 switch (N0VT.getSimpleVT()) {
1193 default: assert(0 && "Unknown truncate!");
1195 Opc = X86::MOV16to16_;
1198 Opc = X86::MOV32to32_;
1202 // The use of MVT::Flag here is not strictly accurate, but it helps
1203 // scheduling in some cases.
1204 N0 = SDValue(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0);
1205 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1206 MVT::i8, N0, SRIdx, N0.getValue(1));
1209 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1210 SDValue Chain = Node->getOperand(0);
1211 SDValue In1 = Node->getOperand(1);
1212 SDValue In2L = Node->getOperand(2);
1213 SDValue In2H = Node->getOperand(3);
1214 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1215 if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3))
1217 AddToISelQueue(Tmp0);
1218 AddToISelQueue(Tmp1);
1219 AddToISelQueue(Tmp2);
1220 AddToISelQueue(Tmp3);
1221 AddToISelQueue(In2L);
1222 AddToISelQueue(In2H);
1223 AddToISelQueue(Chain);
1224 SDValue LSI = CurDAG->getMemOperand(cast<MemSDNode>(In1)->getMemOperand());
1225 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, In2L, In2H, LSI, Chain };
1226 return CurDAG->getTargetNode(Opc, MVT::i32, MVT::i32, MVT::Other, Ops, 8);
1229 SDNode *X86DAGToDAGISel::Select(SDValue N) {
1230 SDNode *Node = N.getNode();
1231 MVT NVT = Node->getValueType(0);
1233 unsigned Opcode = Node->getOpcode();
1236 DOUT << std::string(Indent, ' ') << "Selecting: ";
1237 DEBUG(Node->dump(CurDAG));
1242 if (Node->isMachineOpcode()) {
1244 DOUT << std::string(Indent-2, ' ') << "== ";
1245 DEBUG(Node->dump(CurDAG));
1249 return NULL; // Already selected.
1254 case X86ISD::GlobalBaseReg:
1255 return getGlobalBaseReg();
1258 // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
1259 // code and is matched first so to prevent it from being turned into
1261 // In 64-bit small code size mode, use LEA to take advantage of
1262 // RIP-relative addressing.
1263 if (TM.getCodeModel() != CodeModel::Small)
1265 MVT PtrVT = TLI.getPointerTy();
1266 SDValue N0 = N.getOperand(0);
1267 SDValue N1 = N.getOperand(1);
1268 if (N.getNode()->getValueType(0) == PtrVT &&
1269 N0.getOpcode() == X86ISD::Wrapper &&
1270 N1.getOpcode() == ISD::Constant) {
1271 unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getZExtValue();
1273 // TODO: handle ExternalSymbolSDNode.
1274 if (GlobalAddressSDNode *G =
1275 dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
1276 C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
1277 G->getOffset() + Offset);
1278 } else if (ConstantPoolSDNode *CP =
1279 dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
1280 C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
1282 CP->getOffset()+Offset);
1286 if (Subtarget->is64Bit()) {
1287 SDValue Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
1288 CurDAG->getRegister(0, PtrVT), C };
1289 return CurDAG->SelectNodeTo(N.getNode(), X86::LEA64r,
1292 return CurDAG->SelectNodeTo(N.getNode(), X86::MOV32ri, PtrVT, C);
1296 // Other cases are handled by auto-generated code.
1300 case X86ISD::ATOMOR64_DAG:
1301 return SelectAtomic64(Node, X86::ATOMOR6432);
1302 case X86ISD::ATOMXOR64_DAG:
1303 return SelectAtomic64(Node, X86::ATOMXOR6432);
1304 case X86ISD::ATOMADD64_DAG:
1305 return SelectAtomic64(Node, X86::ATOMADD6432);
1306 case X86ISD::ATOMSUB64_DAG:
1307 return SelectAtomic64(Node, X86::ATOMSUB6432);
1308 case X86ISD::ATOMNAND64_DAG:
1309 return SelectAtomic64(Node, X86::ATOMNAND6432);
1310 case X86ISD::ATOMAND64_DAG:
1311 return SelectAtomic64(Node, X86::ATOMAND6432);
1313 case ISD::SMUL_LOHI:
1314 case ISD::UMUL_LOHI: {
1315 SDValue N0 = Node->getOperand(0);
1316 SDValue N1 = Node->getOperand(1);
1318 bool isSigned = Opcode == ISD::SMUL_LOHI;
1320 switch (NVT.getSimpleVT()) {
1321 default: assert(0 && "Unsupported VT!");
1322 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1323 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1324 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1325 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1328 switch (NVT.getSimpleVT()) {
1329 default: assert(0 && "Unsupported VT!");
1330 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1331 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1332 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1333 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1336 unsigned LoReg, HiReg;
1337 switch (NVT.getSimpleVT()) {
1338 default: assert(0 && "Unsupported VT!");
1339 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1340 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1341 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1342 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1345 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1346 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1347 // multiplty is commmutative
1349 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
1355 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
1356 N0, SDValue()).getValue(1);
1359 AddToISelQueue(N1.getOperand(0));
1360 AddToISelQueue(Tmp0);
1361 AddToISelQueue(Tmp1);
1362 AddToISelQueue(Tmp2);
1363 AddToISelQueue(Tmp3);
1364 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1366 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1367 InFlag = SDValue(CNode, 1);
1368 // Update the chain.
1369 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1373 SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1376 // Copy the low half of the result, if it is needed.
1377 if (!N.getValue(0).use_empty()) {
1378 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1379 LoReg, NVT, InFlag);
1380 InFlag = Result.getValue(2);
1381 ReplaceUses(N.getValue(0), Result);
1383 DOUT << std::string(Indent-2, ' ') << "=> ";
1384 DEBUG(Result.getNode()->dump(CurDAG));
1388 // Copy the high half of the result, if it is needed.
1389 if (!N.getValue(1).use_empty()) {
1391 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1392 // Prevent use of AH in a REX instruction by referencing AX instead.
1393 // Shift it down 8 bits.
1394 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1395 X86::AX, MVT::i16, InFlag);
1396 InFlag = Result.getValue(2);
1397 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1398 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1399 // Then truncate it down to i8.
1400 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1401 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1402 MVT::i8, Result, SRIdx), 0);
1404 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1405 HiReg, NVT, InFlag);
1406 InFlag = Result.getValue(2);
1408 ReplaceUses(N.getValue(1), Result);
1410 DOUT << std::string(Indent-2, ' ') << "=> ";
1411 DEBUG(Result.getNode()->dump(CurDAG));
1424 case ISD::UDIVREM: {
1425 SDValue N0 = Node->getOperand(0);
1426 SDValue N1 = Node->getOperand(1);
1428 bool isSigned = Opcode == ISD::SDIVREM;
1430 switch (NVT.getSimpleVT()) {
1431 default: assert(0 && "Unsupported VT!");
1432 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1433 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1434 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1435 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1438 switch (NVT.getSimpleVT()) {
1439 default: assert(0 && "Unsupported VT!");
1440 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1441 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1442 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1443 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1446 unsigned LoReg, HiReg;
1447 unsigned ClrOpcode, SExtOpcode;
1448 switch (NVT.getSimpleVT()) {
1449 default: assert(0 && "Unsupported VT!");
1451 LoReg = X86::AL; HiReg = X86::AH;
1453 SExtOpcode = X86::CBW;
1456 LoReg = X86::AX; HiReg = X86::DX;
1457 ClrOpcode = X86::MOV16r0;
1458 SExtOpcode = X86::CWD;
1461 LoReg = X86::EAX; HiReg = X86::EDX;
1462 ClrOpcode = X86::MOV32r0;
1463 SExtOpcode = X86::CDQ;
1466 LoReg = X86::RAX; HiReg = X86::RDX;
1467 ClrOpcode = X86::MOV64r0;
1468 SExtOpcode = X86::CQO;
1472 SDValue Tmp0, Tmp1, Tmp2, Tmp3;
1473 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1476 if (NVT == MVT::i8 && !isSigned) {
1477 // Special case for div8, just use a move with zero extension to AX to
1478 // clear the upper 8 bits (AH).
1479 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1480 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1481 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1482 AddToISelQueue(N0.getOperand(0));
1483 AddToISelQueue(Tmp0);
1484 AddToISelQueue(Tmp1);
1485 AddToISelQueue(Tmp2);
1486 AddToISelQueue(Tmp3);
1488 SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1490 Chain = Move.getValue(1);
1491 ReplaceUses(N0.getValue(1), Chain);
1495 SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1496 Chain = CurDAG->getEntryNode();
1498 Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDValue());
1499 InFlag = Chain.getValue(1);
1503 CurDAG->getCopyToReg(CurDAG->getEntryNode(),
1504 LoReg, N0, SDValue()).getValue(1);
1506 // Sign extend the low part into the high part.
1508 SDValue(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1510 // Zero out the high part, effectively zero extending the input.
1511 SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
1512 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
1513 ClrNode, InFlag).getValue(1);
1518 AddToISelQueue(N1.getOperand(0));
1519 AddToISelQueue(Tmp0);
1520 AddToISelQueue(Tmp1);
1521 AddToISelQueue(Tmp2);
1522 AddToISelQueue(Tmp3);
1523 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1525 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1526 InFlag = SDValue(CNode, 1);
1527 // Update the chain.
1528 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1532 SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1535 // Copy the division (low) result, if it is needed.
1536 if (!N.getValue(0).use_empty()) {
1537 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1538 LoReg, NVT, InFlag);
1539 InFlag = Result.getValue(2);
1540 ReplaceUses(N.getValue(0), Result);
1542 DOUT << std::string(Indent-2, ' ') << "=> ";
1543 DEBUG(Result.getNode()->dump(CurDAG));
1547 // Copy the remainder (high) result, if it is needed.
1548 if (!N.getValue(1).use_empty()) {
1550 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1551 // Prevent use of AH in a REX instruction by referencing AX instead.
1552 // Shift it down 8 bits.
1553 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1554 X86::AX, MVT::i16, InFlag);
1555 InFlag = Result.getValue(2);
1556 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1557 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1558 // Then truncate it down to i8.
1559 SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1560 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1561 MVT::i8, Result, SRIdx), 0);
1563 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1564 HiReg, NVT, InFlag);
1565 InFlag = Result.getValue(2);
1567 ReplaceUses(N.getValue(1), Result);
1569 DOUT << std::string(Indent-2, ' ') << "=> ";
1570 DEBUG(Result.getNode()->dump(CurDAG));
1582 case ISD::SIGN_EXTEND_INREG: {
1583 MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
1584 if (SVT == MVT::i8 && !Subtarget->is64Bit()) {
1585 SDValue N0 = Node->getOperand(0);
1588 SDValue TruncOp = SDValue(getTruncateTo8Bit(N0), 0);
1590 switch (NVT.getSimpleVT()) {
1591 default: assert(0 && "Unknown sign_extend_inreg!");
1593 Opc = X86::MOVSX16rr8;
1596 Opc = X86::MOVSX32rr8;
1600 SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
1603 DOUT << std::string(Indent-2, ' ') << "=> ";
1604 DEBUG(TruncOp.getNode()->dump(CurDAG));
1606 DOUT << std::string(Indent-2, ' ') << "=> ";
1607 DEBUG(ResNode->dump(CurDAG));
1616 case ISD::TRUNCATE: {
1617 if (NVT == MVT::i8 && !Subtarget->is64Bit()) {
1618 SDValue Input = Node->getOperand(0);
1619 AddToISelQueue(Node->getOperand(0));
1620 SDNode *ResNode = getTruncateTo8Bit(Input);
1623 DOUT << std::string(Indent-2, ' ') << "=> ";
1624 DEBUG(ResNode->dump(CurDAG));
1633 case ISD::DECLARE: {
1634 // Handle DECLARE nodes here because the second operand may have been
1635 // wrapped in X86ISD::Wrapper.
1636 SDValue Chain = Node->getOperand(0);
1637 SDValue N1 = Node->getOperand(1);
1638 SDValue N2 = Node->getOperand(2);
1639 if (!isa<FrameIndexSDNode>(N1))
1641 int FI = cast<FrameIndexSDNode>(N1)->getIndex();
1642 if (N2.getOpcode() == ISD::ADD &&
1643 N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
1644 N2 = N2.getOperand(1);
1645 if (N2.getOpcode() == X86ISD::Wrapper &&
1646 isa<GlobalAddressSDNode>(N2.getOperand(0))) {
1648 cast<GlobalAddressSDNode>(N2.getOperand(0))->getGlobal();
1649 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1650 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
1651 AddToISelQueue(Chain);
1652 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1653 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
1654 MVT::Other, Ops, 3);
1660 SDNode *ResNode = SelectCode(N);
1663 DOUT << std::string(Indent-2, ' ') << "=> ";
1664 if (ResNode == NULL || ResNode == N.getNode())
1665 DEBUG(N.getNode()->dump(CurDAG));
1667 DEBUG(ResNode->dump(CurDAG));
1675 bool X86DAGToDAGISel::
1676 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1677 std::vector<SDValue> &OutOps) {
1678 SDValue Op0, Op1, Op2, Op3;
1679 switch (ConstraintCode) {
1680 case 'o': // offsetable ??
1681 case 'v': // not offsetable ??
1682 default: return true;
1684 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1689 OutOps.push_back(Op0);
1690 OutOps.push_back(Op1);
1691 OutOps.push_back(Op2);
1692 OutOps.push_back(Op3);
1693 AddToISelQueue(Op0);
1694 AddToISelQueue(Op1);
1695 AddToISelQueue(Op2);
1696 AddToISelQueue(Op3);
1700 /// createX86ISelDag - This pass converts a legalized DAG into a
1701 /// X86-specific DAG, ready for instruction scheduling.
1703 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1704 return new X86DAGToDAGISel(TM, Fast);