1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef X86ISELLOWERING_H
16 #define X86ISELLOWERING_H
18 #include "X86Subtarget.h"
19 #include "X86RegisterInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "llvm/Target/TargetLowering.h"
22 #include "llvm/CodeGen/FastISel.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
28 // X86 Specific DAG Nodes
30 // Start the numbering where the builtin ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 /// BSF - Bit scan forward.
34 /// BSR - Bit scan reverse.
38 /// SHLD, SHRD - Double shift instructions. These correspond to
39 /// X86::SHLDxx and X86::SHRDxx instructions.
43 /// FAND - Bitwise logical AND of floating point values. This corresponds
44 /// to X86::ANDPS or X86::ANDPD.
47 /// FOR - Bitwise logical OR of floating point values. This corresponds
48 /// to X86::ORPS or X86::ORPD.
51 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
52 /// to X86::XORPS or X86::XORPD.
55 /// FSRL - Bitwise logical right shift of floating point values. These
56 /// corresponds to X86::PSRLDQ.
59 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
60 /// integer source in memory and FP reg result. This corresponds to the
61 /// X86::FILD*m instructions. It has three inputs (token chain, address,
62 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
63 /// also produces a flag).
67 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
68 /// integer destination in memory and a FP reg source. This corresponds
69 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
70 /// has two inputs (token chain and address) and two outputs (int value
76 /// FLD - This instruction implements an extending load to FP stack slots.
77 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
78 /// operand, ptr to load from, and a ValueType node indicating the type
82 /// FST - This instruction implements a truncating store to FP stack
83 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
84 /// chain operand, value to store, address, and a ValueType to store it
88 /// CALL/TAILCALL - These operations represent an abstract X86 call
89 /// instruction, which includes a bunch of information. In particular the
90 /// operands of these node are:
92 /// #0 - The incoming token chain
94 /// #2 - The number of arg bytes the caller pushes on the stack.
95 /// #3 - The number of arg bytes the callee pops off the stack.
96 /// #4 - The value to pass in AL/AX/EAX (optional)
97 /// #5 - The value to pass in DL/DX/EDX (optional)
99 /// The result values of these nodes are:
101 /// #0 - The outgoing token chain
102 /// #1 - The first register result value (optional)
103 /// #2 - The second register result value (optional)
105 /// The CALL vs TAILCALL distinction boils down to whether the callee is
106 /// known not to modify the caller's stack frame, as is standard with
111 /// RDTSC_DAG - This operation implements the lowering for
115 /// X86 compare and logical compare instructions.
118 /// X86 bit-test instructions.
121 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the flag
122 /// operand produced by a CMP instruction.
125 /// X86 conditional moves. Operand 0 and operand 1 are the two values
126 /// to select from. Operand 2 is the condition code, and operand 3 is the
127 /// flag operand produced by a CMP or TEST instruction. It also writes a
131 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
132 /// is the block to branch if condition is true, operand 2 is the
133 /// condition code, and operand 3 is the flag operand produced by a CMP
134 /// or TEST instruction.
137 /// Return with a flag operand. Operand 0 is the chain operand, operand
138 /// 1 is the number of bytes of stack to pop.
141 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
144 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
147 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
148 /// at function entry, used for PIC code.
151 /// Wrapper - A wrapper node for TargetConstantPool,
152 /// TargetExternalSymbol, and TargetGlobalAddress.
155 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
156 /// relative displacements.
159 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
160 /// i32, corresponds to X86::PEXTRB.
163 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
164 /// i32, corresponds to X86::PEXTRW.
167 /// INSERTPS - Insert any element of a 4 x float vector into any element
168 /// of a destination 4 x floatvector.
171 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
172 /// corresponds to X86::PINSRB.
175 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
176 /// corresponds to X86::PINSRW.
179 /// PSHUFB - Shuffle 16 8-bit values within a vector.
182 /// FMAX, FMIN - Floating point max and min.
186 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
187 /// approximation. Note that these typically require refinement
188 /// in order to obtain suitable precision.
191 // TLSADDR - Thread Local Storage.
194 // SegmentBaseAddress - The address segment:0
197 // EH_RETURN - Exception Handling helpers.
200 /// TC_RETURN - Tail call return.
202 /// operand #1 callee (register or absolute)
203 /// operand #2 stack adjustment
204 /// operand #3 optional in flag
207 // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
211 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
212 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
213 // Atomic 64-bit binary operations.
222 // FNSTCW16m - Store FP control world into i16 memory.
225 // VZEXT_MOVL - Vector move low and zero extend.
228 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
231 // VSHL, VSRL - Vector logical left / right shift.
234 // CMPPD, CMPPS - Vector double/float comparison.
235 // CMPPD, CMPPS - Vector double/float comparison.
238 // PCMP* - Vector integer comparisons.
239 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
240 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
242 // ADD, SUB, SMUL, UMUL, etc. - Arithmetic operations with FLAGS results.
243 ADD, SUB, SMUL, UMUL,
246 // MUL_IMM - X86 specific multiply by immediate.
251 /// Define some predicates that are used for node matching.
253 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
254 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
255 bool isPSHUFDMask(ShuffleVectorSDNode *N);
257 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
258 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
259 bool isPSHUFHWMask(ShuffleVectorSDNode *N);
261 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
262 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
263 bool isPSHUFLWMask(ShuffleVectorSDNode *N);
265 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
266 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
267 bool isSHUFPMask(ShuffleVectorSDNode *N);
269 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
270 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
271 bool isMOVHLPSMask(ShuffleVectorSDNode *N);
273 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
274 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
276 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
278 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
279 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
280 bool isMOVLPMask(ShuffleVectorSDNode *N);
282 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
283 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
284 /// as well as MOVLHPS.
285 bool isMOVHPMask(ShuffleVectorSDNode *N);
287 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
288 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
289 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
291 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
292 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
293 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
295 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
296 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
298 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N);
300 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
301 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
303 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N);
305 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
306 /// specifies a shuffle of elements that is suitable for input to MOVSS,
307 /// MOVSD, and MOVD, i.e. setting the lowest element.
308 bool isMOVLMask(ShuffleVectorSDNode *N);
310 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
311 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
312 bool isMOVSHDUPMask(ShuffleVectorSDNode *N);
314 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
315 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
316 bool isMOVSLDUPMask(ShuffleVectorSDNode *N);
318 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
319 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
320 bool isMOVDDUPMask(ShuffleVectorSDNode *N);
322 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
323 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
325 unsigned getShuffleSHUFImmediate(SDNode *N);
327 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
328 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
330 unsigned getShufflePSHUFHWImmediate(SDNode *N);
332 /// getShufflePSHUFKWImmediate - Return the appropriate immediate to shuffle
333 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
335 unsigned getShufflePSHUFLWImmediate(SDNode *N);
338 //===--------------------------------------------------------------------===//
339 // X86TargetLowering - X86 Implementation of the TargetLowering interface
340 class X86TargetLowering : public TargetLowering {
341 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
342 int RegSaveFrameIndex; // X86-64 vararg func register save area.
343 unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset.
344 unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset.
345 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
346 int BytesCallerReserves; // Number of arg bytes caller makes.
349 explicit X86TargetLowering(X86TargetMachine &TM);
351 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
353 SDValue getPICJumpTableRelocBase(SDValue Table,
354 SelectionDAG &DAG) const;
356 // Return the number of bytes that a function should pop when it returns (in
357 // addition to the space used by the return address).
359 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
361 // Return the number of bytes that the caller reserves for arguments passed
363 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
365 /// getStackPtrReg - Return the stack pointer register we are using: either
367 unsigned getStackPtrReg() const { return X86StackPtr; }
369 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
370 /// function arguments in the caller parameter area. For X86, aggregates
371 /// that contains are placed at 16-byte boundaries while the rest are at
372 /// 4-byte boundaries.
373 virtual unsigned getByValTypeAlignment(const Type *Ty) const;
375 /// getOptimalMemOpType - Returns the target specific optimal type for load
376 /// and store operations as a result of memset, memcpy, and memmove
377 /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
380 MVT getOptimalMemOpType(uint64_t Size, unsigned Align,
381 bool isSrcConst, bool isSrcStr,
382 SelectionDAG &DAG) const;
384 /// LowerOperation - Provide custom lowering hooks for some operations.
386 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
388 /// ReplaceNodeResults - Replace the results of node with an illegal result
389 /// type with new values built out of custom code.
391 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
395 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
397 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
398 MachineBasicBlock *MBB) const;
401 /// getTargetNodeName - This method returns the name of a target specific
403 virtual const char *getTargetNodeName(unsigned Opcode) const;
405 /// getSetCCResultType - Return the ISD::SETCC ValueType
406 virtual MVT getSetCCResultType(MVT VT) const;
408 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
409 /// in Mask are known to be either zero or one and return them in the
410 /// KnownZero/KnownOne bitsets.
411 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
415 const SelectionDAG &DAG,
416 unsigned Depth = 0) const;
419 isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
421 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG);
423 ConstraintType getConstraintType(const std::string &Constraint) const;
425 std::vector<unsigned>
426 getRegClassForInlineAsmConstraint(const std::string &Constraint,
429 virtual const char *LowerXConstraint(MVT ConstraintVT) const;
431 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
432 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
433 /// true it means one of the asm constraint of the inline asm instruction
434 /// being processed is 'm'.
435 virtual void LowerAsmOperandForConstraint(SDValue Op,
436 char ConstraintLetter,
438 std::vector<SDValue> &Ops,
439 SelectionDAG &DAG) const;
441 /// getRegForInlineAsmConstraint - Given a physical register constraint
442 /// (e.g. {edx}), return the register number and the register class for the
443 /// register. This should only be used for C_Register constraints. On
444 /// error, this returns a register number of 0.
445 std::pair<unsigned, const TargetRegisterClass*>
446 getRegForInlineAsmConstraint(const std::string &Constraint,
449 /// isLegalAddressingMode - Return true if the addressing mode represented
450 /// by AM is legal for this target, for a load/store of the specified type.
451 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
453 /// isTruncateFree - Return true if it's free to truncate a value of
454 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
455 /// register EAX to i16 by referencing its sub-register AX.
456 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
457 virtual bool isTruncateFree(MVT VT1, MVT VT2) const;
459 /// isZExtFree - Return true if any actual instruction that defines a
460 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
461 /// register. This does not necessarily include registers defined in
462 /// unknown ways, such as incoming arguments, or copies from unknown
463 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
464 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
465 /// all instructions that define 32-bit values implicit zero-extend the
466 /// result out to 64 bits.
467 virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
468 virtual bool isZExtFree(MVT VT1, MVT VT2) const;
470 /// isNarrowingProfitable - Return true if it's profitable to narrow
471 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
472 /// from i32 to i8 but not from i32 to i16.
473 virtual bool isNarrowingProfitable(MVT VT1, MVT VT2) const;
475 /// isShuffleMaskLegal - Targets can use this to indicate that they only
476 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
477 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
478 /// values are assumed to be legal.
479 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
482 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
483 /// used by Targets can use this to indicate if there is a suitable
484 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
486 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
489 /// ShouldShrinkFPConstant - If true, then instruction selection should
490 /// seek to shrink the FP constant of the specified type to a smaller type
491 /// in order to save space and / or reduce runtime.
492 virtual bool ShouldShrinkFPConstant(MVT VT) const {
493 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
494 // expensive than a straight movsd. On the other hand, it's important to
495 // shrink long double fp constant since fldt is very slow.
496 return !X86ScalarSSEf64 || VT == MVT::f80;
499 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
500 /// for tail call optimization. Target which want to do tail call
501 /// optimization should implement this function.
502 virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall,
504 SelectionDAG &DAG) const;
506 virtual const X86Subtarget* getSubtarget() {
510 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
511 /// computed in an SSE register, not on the X87 floating point stack.
512 bool isScalarFPTypeInSSEReg(MVT VT) const {
513 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
514 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
517 /// getWidenVectorType: given a vector type, returns the type to widen
518 /// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
519 /// If there is no vector type that we want to widen to, returns MVT::Other
520 /// When and were to widen is target dependent based on the cost of
521 /// scalarizing vs using the wider vector type.
522 virtual MVT getWidenVectorType(MVT VT) const;
524 /// createFastISel - This method returns a target specific FastISel object,
525 /// or null if the target does not support "fast" ISel.
527 createFastISel(MachineFunction &mf,
528 MachineModuleInfo *mmi, DwarfWriter *dw,
529 DenseMap<const Value *, unsigned> &,
530 DenseMap<const BasicBlock *, MachineBasicBlock *> &,
531 DenseMap<const AllocaInst *, int> &
533 , SmallSet<Instruction*, 8> &
537 /// getFunctionAlignment - Return the Log2 alignment of this function.
538 virtual unsigned getFunctionAlignment(const Function *F) const;
541 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
542 /// make the right decision when generating code for different targets.
543 const X86Subtarget *Subtarget;
544 const X86RegisterInfo *RegInfo;
545 const TargetData *TD;
547 /// X86StackPtr - X86 physical register used as stack ptr.
548 unsigned X86StackPtr;
550 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
551 /// floating point ops.
552 /// When SSE is available, use it for f32 operations.
553 /// When SSE2 is available, use it for f64 operations.
554 bool X86ScalarSSEf32;
555 bool X86ScalarSSEf64;
557 SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
558 unsigned CallingConv, SelectionDAG &DAG);
560 SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG,
561 const CCValAssign &VA, MachineFrameInfo *MFI,
562 unsigned CC, SDValue Root, unsigned i);
564 SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
565 const SDValue &StackPtr,
566 const CCValAssign &VA, SDValue Chain,
567 SDValue Arg, ISD::ArgFlagsTy Flags);
569 // Call lowering helpers.
570 bool IsCalleePop(bool isVarArg, unsigned CallingConv);
571 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
572 SDValue Chain, bool IsTailCall, bool Is64Bit,
573 int FPDiff, DebugLoc dl);
575 CCAssignFn *CCAssignFnForNode(unsigned CallingConv) const;
576 NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op);
577 unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG);
579 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
582 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG);
583 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG);
584 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
585 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
586 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
587 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
588 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG);
589 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
590 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
591 int64_t Offset, SelectionDAG &DAG) const;
592 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
593 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
594 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG);
595 SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
596 SDValue BuildFILD(SDValue Op, MVT SrcVT, SDValue Chain, SDValue StackSlot,
598 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG);
599 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG);
600 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG);
601 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG);
602 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG);
603 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG);
604 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG);
605 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG);
606 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG);
607 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG);
608 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG);
609 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG);
610 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG);
611 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG);
612 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
613 SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
614 SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
615 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
616 SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
617 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG);
618 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG);
619 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG);
620 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
621 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG);
622 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
623 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG);
624 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG);
625 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG);
626 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG);
627 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG);
628 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG);
629 SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG);
630 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG);
632 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG);
633 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG);
634 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG);
636 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
637 SelectionDAG &DAG, unsigned NewOp);
639 SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
641 SDValue Dst, SDValue Src,
642 SDValue Size, unsigned Align,
643 const Value *DstSV, uint64_t DstSVOff);
644 SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
646 SDValue Dst, SDValue Src,
647 SDValue Size, unsigned Align,
649 const Value *DstSV, uint64_t DstSVOff,
650 const Value *SrcSV, uint64_t SrcSVOff);
652 /// Utility function to emit atomic bitwise operations (and, or, xor).
653 // It takes the bitwise instruction to expand, the associated machine basic
654 // block, and the associated X86 opcodes for reg/reg and reg/imm.
655 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
656 MachineInstr *BInstr,
657 MachineBasicBlock *BB,
665 TargetRegisterClass *RC,
666 bool invSrc = false) const;
668 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
669 MachineInstr *BInstr,
670 MachineBasicBlock *BB,
675 bool invSrc = false) const;
677 /// Utility function to emit atomic min and max. It takes the min/max
678 /// instruction to expand, the associated basic block, and the associated
679 /// cmov opcode for moving the min or max value.
680 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
681 MachineBasicBlock *BB,
682 unsigned cmovOpc) const;
684 /// Emit nodes that will be selected as "test Op0,Op0", or something
685 /// equivalent, for use with the given x86 condition code.
686 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG);
688 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
689 /// equivalent, for use with the given x86 condition code.
690 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
695 FastISel *createFastISel(MachineFunction &mf,
696 MachineModuleInfo *mmi, DwarfWriter *dw,
697 DenseMap<const Value *, unsigned> &,
698 DenseMap<const BasicBlock *, MachineBasicBlock *> &,
699 DenseMap<const AllocaInst *, int> &
701 , SmallSet<Instruction*, 8> &
707 #endif // X86ISELLOWERING_H