1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
16 #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/Target/TargetLowering.h"
21 #include "llvm/Target/TargetOptions.h"
25 class X86TargetMachine;
28 // X86 Specific DAG Nodes
30 // Start the numbering where the builtin ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 /// BSF - Bit scan forward.
34 /// BSR - Bit scan reverse.
38 /// SHLD, SHRD - Double shift instructions. These correspond to
39 /// X86::SHLDxx and X86::SHRDxx instructions.
43 /// FAND - Bitwise logical AND of floating point values. This corresponds
44 /// to X86::ANDPS or X86::ANDPD.
47 /// FOR - Bitwise logical OR of floating point values. This corresponds
48 /// to X86::ORPS or X86::ORPD.
51 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
52 /// to X86::XORPS or X86::XORPD.
55 /// FANDN - Bitwise logical ANDNOT of floating point values. This
56 /// corresponds to X86::ANDNPS or X86::ANDNPD.
59 /// FSRL - Bitwise logical right shift of floating point values. These
60 /// corresponds to X86::PSRLDQ.
63 /// CALL - These operations represent an abstract X86 call
64 /// instruction, which includes a bunch of information. In particular the
65 /// operands of these node are:
67 /// #0 - The incoming token chain
69 /// #2 - The number of arg bytes the caller pushes on the stack.
70 /// #3 - The number of arg bytes the callee pops off the stack.
71 /// #4 - The value to pass in AL/AX/EAX (optional)
72 /// #5 - The value to pass in DL/DX/EDX (optional)
74 /// The result values of these nodes are:
76 /// #0 - The outgoing token chain
77 /// #1 - The first register result value (optional)
78 /// #2 - The second register result value (optional)
82 /// RDTSC_DAG - This operation implements the lowering for
86 /// X86 Read Time-Stamp Counter and Processor ID.
89 /// X86 Read Performance Monitoring Counters.
92 /// X86 compare and logical compare instructions.
95 /// X86 bit-test instructions.
98 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
99 /// operand, usually produced by a CMP instruction.
105 // Same as SETCC except it's materialized with a sbb and the value is all
106 // one's or all zero's.
107 SETCC_CARRY, // R = carry_bit ? ~0 : 0
109 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
110 /// Operands are two FP values to compare; result is a mask of
111 /// 0s or 1s. Generally DTRT for C/C++ with NaNs.
114 /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
115 /// result in an integer GPR. Needs masking for scalar result.
118 /// X86 conditional moves. Operand 0 and operand 1 are the two values
119 /// to select from. Operand 2 is the condition code, and operand 3 is the
120 /// flag operand produced by a CMP or TEST instruction. It also writes a
124 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
125 /// is the block to branch if condition is true, operand 2 is the
126 /// condition code, and operand 3 is the flag operand produced by a CMP
127 /// or TEST instruction.
130 /// Return with a flag operand. Operand 0 is the chain operand, operand
131 /// 1 is the number of bytes of stack to pop.
134 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
137 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
140 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
141 /// at function entry, used for PIC code.
144 /// Wrapper - A wrapper node for TargetConstantPool,
145 /// TargetExternalSymbol, and TargetGlobalAddress.
148 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
149 /// relative displacements.
152 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
153 /// to an MMX vector. If you think this is too close to the previous
154 /// mnemonic, so do I; blame Intel.
157 /// MMX_MOVD2W - Copies a 32-bit value from the low word of a MMX
161 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
162 /// i32, corresponds to X86::PEXTRB.
165 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
166 /// i32, corresponds to X86::PEXTRW.
169 /// INSERTPS - Insert any element of a 4 x float vector into any element
170 /// of a destination 4 x floatvector.
173 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
174 /// corresponds to X86::PINSRB.
177 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
178 /// corresponds to X86::PINSRW.
181 /// PSHUFB - Shuffle 16 8-bit values within a vector.
184 /// ANDNP - Bitwise Logical AND NOT of Packed FP values.
187 /// PSIGN - Copy integer sign.
190 /// BLENDI - Blend where the selector is an immediate.
193 /// ADDSUB - Combined add and sub on an FP vector.
196 // SUBUS - Integer sub with unsigned saturation.
199 /// HADD - Integer horizontal add.
202 /// HSUB - Integer horizontal sub.
205 /// FHADD - Floating point horizontal add.
208 /// FHSUB - Floating point horizontal sub.
211 /// UMAX, UMIN - Unsigned integer max and min.
214 /// SMAX, SMIN - Signed integer max and min.
217 /// FMAX, FMIN - Floating point max and min.
221 /// FMAXC, FMINC - Commutative FMIN and FMAX.
224 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
225 /// approximation. Note that these typically require refinement
226 /// in order to obtain suitable precision.
229 // TLSADDR - Thread Local Storage.
232 // TLSBASEADDR - Thread Local Storage. A call to get the start address
233 // of the TLS block for the current module.
236 // TLSCALL - Thread Local Storage. When calling to an OS provided
237 // thunk at the address from an earlier relocation.
240 // EH_RETURN - Exception Handling helpers.
243 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
246 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
249 /// TC_RETURN - Tail call return. See X86TargetLowering::LowerCall for
250 /// the list of operands.
253 // VZEXT_MOVL - Vector move to low scalar and zero higher vector elements.
256 // VZEXT - Vector integer zero-extend.
259 // VSEXT - Vector integer signed-extend.
262 // VTRUNC - Vector integer truncate.
265 // VTRUNC - Vector integer truncate with mask.
268 // VFPEXT - Vector FP extend.
271 // VFPROUND - Vector FP round.
274 // VSHL, VSRL - 128-bit vector logical left / right shift
277 // VSHL, VSRL, VSRA - Vector shift elements
280 // VSHLI, VSRLI, VSRAI - Vector shift elements by immediate
283 // CMPP - Vector packed double/float comparison.
286 // PCMP* - Vector integer comparisons.
288 // PCMP*M - Vector integer comparisons, the result is in a mask vector.
291 /// CMPM, CMPMU - Vector comparison generating mask bits for fp and
292 /// integer signed and unsigned data types.
296 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
297 ADD, SUB, ADC, SBB, SMUL,
298 INC, DEC, OR, XOR, AND,
300 BEXTR, // BEXTR - Bit field extract
302 UMUL, // LOW, HI, FLAGS = umul LHS, RHS
304 // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS
307 // MUL_IMM - X86 specific multiply by immediate.
310 // PTEST - Vector bitwise comparisons.
313 // TESTP - Vector packed fp sign bitwise comparisons.
316 // TESTM, TESTNM - Vector "test" in AVX-512, the result is in a mask vector.
320 // OR/AND test for masks
323 // Several flavors of instructions with vector shuffle behaviors.
328 // AVX512 inter-lane alignr
356 // Insert/Extract vector element
360 // Vector multiply packed unsigned doubleword integers
362 // Vector multiply packed signed doubleword integers
373 // Save xmm argument registers to the stack, according to %al. An operator
374 // is needed so that this can be expanded with control flow.
375 VASTART_SAVE_XMM_REGS,
377 // Windows's _chkstk call to do stack probing.
380 // For allocating variable amounts of stack space when using
381 // segmented stacks. Check if the current stacklet has enough space, and
382 // falls back to heap allocation if not.
385 // Windows's _ftol2 runtime routine to do fptoui.
394 // Store FP status word into i16 register.
397 // Store contents of %ah into %eflags.
400 // Get a random integer and indicate whether it is valid in CF.
403 // Get a NIST SP800-90B & C compliant random integer and
404 // indicate whether it is valid in CF.
410 // Test if in transactional execution.
414 LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
418 // Load, scalar_to_vector, and zero extend.
421 // Store FP control world into i16 memory.
424 /// This instruction implements FP_TO_SINT with the
425 /// integer destination in memory and a FP reg source. This corresponds
426 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
427 /// has two inputs (token chain and address) and two outputs (int value
428 /// and token chain).
433 /// This instruction implements SINT_TO_FP with the
434 /// integer source in memory and FP reg result. This corresponds to the
435 /// X86::FILD*m instructions. It has three inputs (token chain, address,
436 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
437 /// also produces a flag).
441 /// This instruction implements an extending load to FP stack slots.
442 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
443 /// operand, ptr to load from, and a ValueType node indicating the type
447 /// This instruction implements a truncating store to FP stack
448 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
449 /// chain operand, value to store, address, and a ValueType to store it
453 /// This instruction grabs the address of the next argument
454 /// from a va_list. (reads and modifies the va_list in memory)
457 // WARNING: Do not add anything in the end unless you want the node to
458 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
459 // thought as target memory ops!
463 /// Define some predicates that are used for node matching.
465 /// Return true if the specified
466 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
467 /// suitable for input to VEXTRACTF128, VEXTRACTI128 instructions.
468 bool isVEXTRACT128Index(SDNode *N);
470 /// Return true if the specified
471 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
472 /// suitable for input to VINSERTF128, VINSERTI128 instructions.
473 bool isVINSERT128Index(SDNode *N);
475 /// Return true if the specified
476 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
477 /// suitable for input to VEXTRACTF64X4, VEXTRACTI64X4 instructions.
478 bool isVEXTRACT256Index(SDNode *N);
480 /// Return true if the specified
481 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
482 /// suitable for input to VINSERTF64X4, VINSERTI64X4 instructions.
483 bool isVINSERT256Index(SDNode *N);
485 /// Return the appropriate
486 /// immediate to extract the specified EXTRACT_SUBVECTOR index
487 /// with VEXTRACTF128, VEXTRACTI128 instructions.
488 unsigned getExtractVEXTRACT128Immediate(SDNode *N);
490 /// Return the appropriate
491 /// immediate to insert at the specified INSERT_SUBVECTOR index
492 /// with VINSERTF128, VINSERT128 instructions.
493 unsigned getInsertVINSERT128Immediate(SDNode *N);
495 /// Return the appropriate
496 /// immediate to extract the specified EXTRACT_SUBVECTOR index
497 /// with VEXTRACTF64X4, VEXTRACTI64x4 instructions.
498 unsigned getExtractVEXTRACT256Immediate(SDNode *N);
500 /// Return the appropriate
501 /// immediate to insert at the specified INSERT_SUBVECTOR index
502 /// with VINSERTF64x4, VINSERTI64x4 instructions.
503 unsigned getInsertVINSERT256Immediate(SDNode *N);
505 /// Returns true if Elt is a constant zero or floating point constant +0.0.
506 bool isZeroNode(SDValue Elt);
508 /// Returns true of the given offset can be
509 /// fit into displacement field of the instruction.
510 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
511 bool hasSymbolicDisplacement = true);
514 /// Determines whether the callee is required to pop its
515 /// own arguments. Callee pop is necessary to support tail calls.
516 bool isCalleePop(CallingConv::ID CallingConv,
517 bool is64Bit, bool IsVarArg, bool TailCallOpt);
519 /// AVX512 static rounding constants. These need to match the values in
521 enum STATIC_ROUNDING {
530 //===--------------------------------------------------------------------===//
531 // X86 Implementation of the TargetLowering interface
532 class X86TargetLowering final : public TargetLowering {
534 explicit X86TargetLowering(const X86TargetMachine &TM);
536 unsigned getJumpTableEncoding() const override;
538 MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; }
541 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
542 const MachineBasicBlock *MBB, unsigned uid,
543 MCContext &Ctx) const override;
545 /// Returns relocation base for the given PIC jumptable.
546 SDValue getPICJumpTableRelocBase(SDValue Table,
547 SelectionDAG &DAG) const override;
549 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
550 unsigned JTI, MCContext &Ctx) const override;
552 /// Return the desired alignment for ByVal aggregate
553 /// function arguments in the caller parameter area. For X86, aggregates
554 /// that contains are placed at 16-byte boundaries while the rest are at
555 /// 4-byte boundaries.
556 unsigned getByValTypeAlignment(Type *Ty) const override;
558 /// Returns the target specific optimal type for load
559 /// and store operations as a result of memset, memcpy, and memmove
560 /// lowering. If DstAlign is zero that means it's safe to destination
561 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
562 /// means there isn't a need to check it against alignment requirement,
563 /// probably because the source does not need to be loaded. If 'IsMemset' is
564 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
565 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
566 /// source is constant so it does not need to be loaded.
567 /// It returns EVT::Other if the type should be determined using generic
568 /// target-independent logic.
569 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
570 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
571 MachineFunction &MF) const override;
573 /// Returns true if it's safe to use load / store of the
574 /// specified type to expand memcpy / memset inline. This is mostly true
575 /// for all types except for some special cases. For example, on X86
576 /// targets without SSE2 f64 load / store are done with fldl / fstpl which
577 /// also does type conversion. Note the specified type doesn't have to be
578 /// legal as the hook is used before type legalization.
579 bool isSafeMemOpType(MVT VT) const override;
581 /// Returns true if the target allows
582 /// unaligned memory accesses. of the specified type. Returns whether it
583 /// is "fast" by reference in the second argument.
584 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align,
585 bool *Fast) const override;
587 /// Provide custom lowering hooks for some operations.
589 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
591 /// Replace the results of node with an illegal result
592 /// type with new values built out of custom code.
594 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
595 SelectionDAG &DAG) const override;
598 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
600 /// Return true if the target has native support for
601 /// the specified value type and it is 'desirable' to use the type for the
602 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
603 /// instruction encodings are longer and some i16 instructions are slow.
604 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
606 /// Return true if the target has native support for the
607 /// specified value type and it is 'desirable' to use the type. e.g. On x86
608 /// i16 is legal, but undesirable since i16 instruction encodings are longer
609 /// and some i16 instructions are slow.
610 bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
613 EmitInstrWithCustomInserter(MachineInstr *MI,
614 MachineBasicBlock *MBB) const override;
617 /// This method returns the name of a target specific DAG node.
618 const char *getTargetNodeName(unsigned Opcode) const override;
620 /// Return the value type to use for ISD::SETCC.
621 EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
623 /// Determine which of the bits specified in Mask are known to be either
624 /// zero or one and return them in the KnownZero/KnownOne bitsets.
625 void computeKnownBitsForTargetNode(const SDValue Op,
628 const SelectionDAG &DAG,
629 unsigned Depth = 0) const override;
631 /// Determine the number of bits in the operation that are sign bits.
632 unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
633 const SelectionDAG &DAG,
634 unsigned Depth) const override;
636 bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
637 int64_t &Offset) const override;
639 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
641 bool ExpandInlineAsm(CallInst *CI) const override;
644 getConstraintType(const std::string &Constraint) const override;
646 /// Examine constraint string and operand type and determine a weight value.
647 /// The operand object must already have been set up with the operand type.
649 getSingleConstraintMatchWeight(AsmOperandInfo &info,
650 const char *constraint) const override;
652 const char *LowerXConstraint(EVT ConstraintVT) const override;
654 /// Lower the specified operand into the Ops vector. If it is invalid, don't
655 /// add anything to Ops. If hasMemory is true it means one of the asm
656 /// constraint of the inline asm instruction being processed is 'm'.
657 void LowerAsmOperandForConstraint(SDValue Op,
658 std::string &Constraint,
659 std::vector<SDValue> &Ops,
660 SelectionDAG &DAG) const override;
662 /// Given a physical register constraint
663 /// (e.g. {edx}), return the register number and the register class for the
664 /// register. This should only be used for C_Register constraints. On
665 /// error, this returns a register number of 0.
666 std::pair<unsigned, const TargetRegisterClass*>
667 getRegForInlineAsmConstraint(const std::string &Constraint,
668 MVT VT) const override;
670 /// Return true if the addressing mode represented
671 /// by AM is legal for this target, for a load/store of the specified type.
672 bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
674 /// Return true if the specified immediate is legal
675 /// icmp immediate, that is the target has icmp instructions which can
676 /// compare a register against the immediate without having to materialize
677 /// the immediate into a register.
678 bool isLegalICmpImmediate(int64_t Imm) const override;
680 /// Return true if the specified immediate is legal
681 /// add immediate, that is the target has add instructions which can
682 /// add a register and the immediate without having to materialize
683 /// the immediate into a register.
684 bool isLegalAddImmediate(int64_t Imm) const override;
686 /// \brief Return the cost of the scaling factor used in the addressing
687 /// mode represented by AM for this target, for a load/store
688 /// of the specified type.
689 /// If the AM is supported, the return value must be >= 0.
690 /// If the AM is not supported, it returns a negative value.
691 int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
693 bool isVectorShiftByScalarCheap(Type *Ty) const override;
695 /// Return true if it's free to truncate a value of
696 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
697 /// register EAX to i16 by referencing its sub-register AX.
698 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
699 bool isTruncateFree(EVT VT1, EVT VT2) const override;
701 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
703 /// Return true if any actual instruction that defines a
704 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
705 /// register. This does not necessarily include registers defined in
706 /// unknown ways, such as incoming arguments, or copies from unknown
707 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
708 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
709 /// all instructions that define 32-bit values implicit zero-extend the
710 /// result out to 64 bits.
711 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
712 bool isZExtFree(EVT VT1, EVT VT2) const override;
713 bool isZExtFree(SDValue Val, EVT VT2) const override;
715 /// Return true if an FMA operation is faster than a pair of fmul and fadd
716 /// instructions. fmuladd intrinsics will be expanded to FMAs when this
717 /// method returns true, otherwise fmuladd is expanded to fmul + fadd.
718 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
720 /// Return true if it's profitable to narrow
721 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
722 /// from i32 to i8 but not from i32 to i16.
723 bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
725 /// Returns true if the target can instruction select the
726 /// specified FP immediate natively. If false, the legalizer will
727 /// materialize the FP immediate as a load from a constant pool.
728 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
730 /// Targets can use this to indicate that they only support *some*
731 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
732 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to
734 bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
735 EVT VT) const override;
737 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
738 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to
739 /// replace a VAND with a constant pool entry.
740 bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
741 EVT VT) const override;
743 /// If true, then instruction selection should
744 /// seek to shrink the FP constant of the specified type to a smaller type
745 /// in order to save space and / or reduce runtime.
746 bool ShouldShrinkFPConstant(EVT VT) const override {
747 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
748 // expensive than a straight movsd. On the other hand, it's important to
749 // shrink long double fp constant since fldt is very slow.
750 return !X86ScalarSSEf64 || VT == MVT::f80;
753 const X86Subtarget* getSubtarget() const {
757 /// Return true if the specified scalar FP type is computed in an SSE
758 /// register, not on the X87 floating point stack.
759 bool isScalarFPTypeInSSEReg(EVT VT) const {
760 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
761 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
764 /// Return true if the target uses the MSVC _ftol2 routine for fptoui.
765 bool isTargetFTOL() const;
767 /// Return true if the MSVC _ftol2 routine should be used for fptoui to the
769 bool isIntegerTypeFTOL(EVT VT) const {
770 return isTargetFTOL() && VT == MVT::i64;
773 /// \brief Returns true if it is beneficial to convert a load of a constant
774 /// to just the constant itself.
775 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
776 Type *Ty) const override;
778 /// Intel processors have a unified instruction and data cache
779 const char * getClearCacheBuiltinName() const override {
780 return nullptr; // nothing to do, move along.
783 unsigned getRegisterByName(const char* RegName, EVT VT) const override;
785 /// This method returns a target specific FastISel object,
786 /// or null if the target does not support "fast" ISel.
787 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
788 const TargetLibraryInfo *libInfo) const override;
790 /// Return true if the target stores stack protector cookies at a fixed
791 /// offset in some non-standard address space, and populates the address
792 /// space and offset as appropriate.
793 bool getStackCookieLocation(unsigned &AddressSpace,
794 unsigned &Offset) const override;
796 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
797 SelectionDAG &DAG) const;
799 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
801 /// \brief Reset the operation actions based on target options.
802 void resetOperationActions() override;
804 bool useLoadStackGuardNode() const override;
805 /// \brief Customize the preferred legalization strategy for certain types.
806 LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
809 std::pair<const TargetRegisterClass*, uint8_t>
810 findRepresentativeClass(MVT VT) const override;
813 /// Keep a pointer to the X86Subtarget around so that we can
814 /// make the right decision when generating code for different targets.
815 const X86Subtarget *Subtarget;
816 const DataLayout *TD;
818 /// Used to store the TargetOptions so that we don't waste time resetting
819 /// the operation actions unless we have to.
822 /// Select between SSE or x87 floating point ops.
823 /// When SSE is available, use it for f32 operations.
824 /// When SSE2 is available, use it for f64 operations.
825 bool X86ScalarSSEf32;
826 bool X86ScalarSSEf64;
828 /// A list of legal FP immediates.
829 std::vector<APFloat> LegalFPImmediates;
831 /// Indicate that this x86 target can instruction
832 /// select the specified FP immediate natively.
833 void addLegalFPImmediate(const APFloat& Imm) {
834 LegalFPImmediates.push_back(Imm);
837 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
838 CallingConv::ID CallConv, bool isVarArg,
839 const SmallVectorImpl<ISD::InputArg> &Ins,
840 SDLoc dl, SelectionDAG &DAG,
841 SmallVectorImpl<SDValue> &InVals) const;
842 SDValue LowerMemArgument(SDValue Chain,
843 CallingConv::ID CallConv,
844 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
845 SDLoc dl, SelectionDAG &DAG,
846 const CCValAssign &VA, MachineFrameInfo *MFI,
848 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
849 SDLoc dl, SelectionDAG &DAG,
850 const CCValAssign &VA,
851 ISD::ArgFlagsTy Flags) const;
853 // Call lowering helpers.
855 /// Check whether the call is eligible for tail call optimization. Targets
856 /// that want to do tail call optimization should implement this function.
857 bool IsEligibleForTailCallOptimization(SDValue Callee,
858 CallingConv::ID CalleeCC,
860 bool isCalleeStructRet,
861 bool isCallerStructRet,
863 const SmallVectorImpl<ISD::OutputArg> &Outs,
864 const SmallVectorImpl<SDValue> &OutVals,
865 const SmallVectorImpl<ISD::InputArg> &Ins,
866 SelectionDAG& DAG) const;
867 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
868 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
869 SDValue Chain, bool IsTailCall, bool Is64Bit,
870 int FPDiff, SDLoc dl) const;
872 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
873 SelectionDAG &DAG) const;
875 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
877 bool isReplace) const;
879 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
880 SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
881 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
882 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
883 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
884 SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const;
885 SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const;
887 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
888 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
889 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
890 SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
891 int64_t Offset, SelectionDAG &DAG) const;
892 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
893 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
894 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
895 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
896 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
897 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
898 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
899 SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const;
900 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
901 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
902 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
903 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
904 SDLoc dl, SelectionDAG &DAG) const;
905 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
906 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
907 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
908 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
909 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
910 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
911 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
912 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
913 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
914 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
915 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
916 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
917 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
918 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
919 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
920 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
921 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
922 SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
925 LowerFormalArguments(SDValue Chain,
926 CallingConv::ID CallConv, bool isVarArg,
927 const SmallVectorImpl<ISD::InputArg> &Ins,
928 SDLoc dl, SelectionDAG &DAG,
929 SmallVectorImpl<SDValue> &InVals) const override;
930 SDValue LowerCall(CallLoweringInfo &CLI,
931 SmallVectorImpl<SDValue> &InVals) const override;
933 SDValue LowerReturn(SDValue Chain,
934 CallingConv::ID CallConv, bool isVarArg,
935 const SmallVectorImpl<ISD::OutputArg> &Outs,
936 const SmallVectorImpl<SDValue> &OutVals,
937 SDLoc dl, SelectionDAG &DAG) const override;
939 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
941 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
943 EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
944 ISD::NodeType ExtendKind) const override;
946 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
948 const SmallVectorImpl<ISD::OutputArg> &Outs,
949 LLVMContext &Context) const override;
951 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
953 bool shouldExpandAtomicLoadInIR(LoadInst *SI) const override;
954 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
955 bool shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
958 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
960 bool needsCmpXchgNb(const Type *MemType) const;
962 /// Utility function to emit atomic-load-arith operations (and, or, xor,
963 /// nand, max, min, umax, umin). It takes the corresponding instruction to
964 /// expand, the associated machine basic block, and the associated X86
965 /// opcodes for reg/reg.
966 MachineBasicBlock *EmitAtomicLoadArith(MachineInstr *MI,
967 MachineBasicBlock *MBB) const;
969 /// Utility function to emit atomic-load-arith operations (and, or, xor,
970 /// nand, add, sub, swap) for 64-bit operands on 32-bit target.
971 MachineBasicBlock *EmitAtomicLoadArith6432(MachineInstr *MI,
972 MachineBasicBlock *MBB) const;
974 // Utility function to emit the low-level va_arg code for X86-64.
975 MachineBasicBlock *EmitVAARG64WithCustomInserter(
977 MachineBasicBlock *MBB) const;
979 /// Utility function to emit the xmm reg save portion of va_start.
980 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
981 MachineInstr *BInstr,
982 MachineBasicBlock *BB) const;
984 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
985 MachineBasicBlock *BB) const;
987 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
988 MachineBasicBlock *BB) const;
990 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI,
991 MachineBasicBlock *BB) const;
993 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
994 MachineBasicBlock *BB) const;
996 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
997 MachineBasicBlock *BB) const;
999 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
1000 MachineBasicBlock *MBB) const;
1002 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
1003 MachineBasicBlock *MBB) const;
1005 MachineBasicBlock *emitFMA3Instr(MachineInstr *MI,
1006 MachineBasicBlock *MBB) const;
1008 /// Emit nodes that will be selected as "test Op0,Op0", or something
1009 /// equivalent, for use with the given x86 condition code.
1010 SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl,
1011 SelectionDAG &DAG) const;
1013 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
1014 /// equivalent, for use with the given x86 condition code.
1015 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl,
1016 SelectionDAG &DAG) const;
1018 /// Convert a comparison if required by the subtarget.
1019 SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const;
1023 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1024 const TargetLibraryInfo *libInfo);
1028 #endif // X86ISELLOWERING_H