1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef X86ISELLOWERING_H
16 #define X86ISELLOWERING_H
18 #include "X86Subtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
24 // X86 Specific DAG Nodes
26 // Start the numbering where the builtin ops leave off.
27 FIRST_NUMBER = ISD::BUILTIN_OP_END+X86::INSTRUCTION_LIST_END,
29 /// SHLD, SHRD - Double shift instructions. These correspond to
30 /// X86::SHLDxx and X86::SHRDxx instructions.
34 /// FAND - Bitwise logical AND of floating point values. This corresponds
35 /// to X86::ANDPS or X86::ANDPD.
38 /// FOR - Bitwise logical OR of floating point values. This corresponds
39 /// to X86::ORPS or X86::ORPD.
42 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
43 /// to X86::XORPS or X86::XORPD.
46 /// FSRL - Bitwise logical right shift of floating point values. These
47 /// corresponds to X86::PSRLDQ.
50 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
51 /// integer source in memory and FP reg result. This corresponds to the
52 /// X86::FILD*m instructions. It has three inputs (token chain, address,
53 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
54 /// also produces a flag).
58 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
59 /// integer destination in memory and a FP reg source. This corresponds
60 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
61 /// has two inputs (token chain and address) and two outputs (int value
67 /// FLD - This instruction implements an extending load to FP stack slots.
68 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
69 /// operand, ptr to load from, and a ValueType node indicating the type
73 /// FST - This instruction implements a truncating store to FP stack
74 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
75 /// chain operand, value to store, address, and a ValueType to store it
79 /// FP_GET_RESULT - This corresponds to FpGETRESULT pseudo instruction
80 /// which copies from ST(0) to the destination. It takes a chain and
81 /// writes a RFP result and a chain.
84 /// FP_SET_RESULT - This corresponds to FpSETRESULT pseudo instruction
85 /// which copies the source operand to ST(0). It takes a chain+value and
86 /// returns a chain and a flag.
89 /// CALL/TAILCALL - These operations represent an abstract X86 call
90 /// instruction, which includes a bunch of information. In particular the
91 /// operands of these node are:
93 /// #0 - The incoming token chain
95 /// #2 - The number of arg bytes the caller pushes on the stack.
96 /// #3 - The number of arg bytes the callee pops off the stack.
97 /// #4 - The value to pass in AL/AX/EAX (optional)
98 /// #5 - The value to pass in DL/DX/EDX (optional)
100 /// The result values of these nodes are:
102 /// #0 - The outgoing token chain
103 /// #1 - The first register result value (optional)
104 /// #2 - The second register result value (optional)
106 /// The CALL vs TAILCALL distinction boils down to whether the callee is
107 /// known not to modify the caller's stack frame, as is standard with
112 /// RDTSC_DAG - This operation implements the lowering for
116 /// X86 compare and logical compare instructions.
117 CMP, TEST, COMI, UCOMI,
119 /// X86 SetCC. Operand 1 is condition code, and operand 2 is the flag
120 /// operand produced by a CMP instruction.
123 /// X86 conditional moves. Operand 1 and operand 2 are the two values
124 /// to select from (operand 1 is a R/W operand). Operand 3 is the
125 /// condition code, and operand 4 is the flag operand produced by a CMP
126 /// or TEST instruction. It also writes a flag result.
129 /// X86 conditional branches. Operand 1 is the chain operand, operand 2
130 /// is the block to branch if condition is true, operand 3 is the
131 /// condition code, and operand 4 is the flag operand produced by a CMP
132 /// or TEST instruction.
135 /// Return with a flag operand. Operand 1 is the chain operand, operand
136 /// 2 is the number of bytes of stack to pop.
139 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
142 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
145 /// LOAD_PACK Load a 128-bit packed float / double value. It has the same
146 /// operands as a normal load.
149 /// LOAD_UA Load an unaligned 128-bit value. It has the same operands as
153 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
154 /// at function entry, used for PIC code.
157 /// Wrapper - A wrapper node for TargetConstantPool,
158 /// TargetExternalSymbol, and TargetGlobalAddress.
161 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
162 /// relative displacements.
165 /// S2VEC - X86 version of SCALAR_TO_VECTOR. The destination base does not
166 /// have to match the operand type.
169 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
170 /// i32, corresponds to X86::PEXTRW.
173 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
174 /// corresponds to X86::PINSRW.
177 /// FMAX, FMIN - Floating point max and min.
183 /// Define some predicates that are used for node matching.
185 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
186 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
187 bool isPSHUFDMask(SDNode *N);
189 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
190 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
191 bool isPSHUFHWMask(SDNode *N);
193 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
194 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
195 bool isPSHUFLWMask(SDNode *N);
197 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
198 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
199 bool isSHUFPMask(SDNode *N);
201 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
202 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
203 bool isMOVHLPSMask(SDNode *N);
205 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
206 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
208 bool isMOVHLPS_v_undef_Mask(SDNode *N);
210 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
211 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
212 bool isMOVLPMask(SDNode *N);
214 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
215 /// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
216 /// as well as MOVLHPS.
217 bool isMOVHPMask(SDNode *N);
219 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
220 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
221 bool isUNPCKLMask(SDNode *N, bool V2IsSplat = false);
223 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
224 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
225 bool isUNPCKHMask(SDNode *N, bool V2IsSplat = false);
227 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
228 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
230 bool isUNPCKL_v_undef_Mask(SDNode *N);
232 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
233 /// specifies a shuffle of elements that is suitable for input to MOVSS,
234 /// MOVSD, and MOVD, i.e. setting the lowest element.
235 bool isMOVLMask(SDNode *N);
237 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
238 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
239 bool isMOVSHDUPMask(SDNode *N);
241 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
242 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
243 bool isMOVSLDUPMask(SDNode *N);
245 /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand
246 /// specifies a splat of a single element.
247 bool isSplatMask(SDNode *N);
249 /// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
250 /// specifies a splat of zero element.
251 bool isSplatLoMask(SDNode *N);
253 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
254 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
256 unsigned getShuffleSHUFImmediate(SDNode *N);
258 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
259 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
261 unsigned getShufflePSHUFHWImmediate(SDNode *N);
263 /// getShufflePSHUFKWImmediate - Return the appropriate immediate to shuffle
264 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
266 unsigned getShufflePSHUFLWImmediate(SDNode *N);
269 //===--------------------------------------------------------------------===//
270 // X86TargetLowering - X86 Implementation of the TargetLowering interface
271 class X86TargetLowering : public TargetLowering {
272 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
273 int RegSaveFrameIndex; // X86-64 vararg func register save area.
274 unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset.
275 unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset.
276 int ReturnAddrIndex; // FrameIndex for return slot.
277 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
278 int BytesCallerReserves; // Number of arg bytes caller makes.
280 X86TargetLowering(TargetMachine &TM);
282 // Return the number of bytes that a function should pop when it returns (in
283 // addition to the space used by the return address).
285 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
287 // Return the number of bytes that the caller reserves for arguments passed
289 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
291 /// getStackPtrReg - Return the stack pointer register we are using: either
293 unsigned getStackPtrReg() const { return X86StackPtr; }
295 /// LowerOperation - Provide custom lowering hooks for some operations.
297 virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
299 virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
301 virtual MachineBasicBlock *InsertAtEndOfBasicBlock(MachineInstr *MI,
302 MachineBasicBlock *MBB);
304 /// getTargetNodeName - This method returns the name of a target specific
306 virtual const char *getTargetNodeName(unsigned Opcode) const;
308 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
309 /// in Mask are known to be either zero or one and return them in the
310 /// KnownZero/KnownOne bitsets.
311 virtual void computeMaskedBitsForTargetNode(const SDOperand Op,
315 unsigned Depth = 0) const;
317 SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
319 ConstraintType getConstraintType(const std::string &Constraint) const;
321 std::vector<unsigned>
322 getRegClassForInlineAsmConstraint(const std::string &Constraint,
323 MVT::ValueType VT) const;
324 /// isOperandValidForConstraint - Return the specified operand (possibly
325 /// modified) if the specified SDOperand is valid for the specified target
326 /// constraint letter, otherwise return null.
327 SDOperand isOperandValidForConstraint(SDOperand Op, char ConstraintLetter,
330 /// getRegForInlineAsmConstraint - Given a physical register constraint
331 /// (e.g. {edx}), return the register number and the register class for the
332 /// register. This should only be used for C_Register constraints. On
333 /// error, this returns a register number of 0.
334 std::pair<unsigned, const TargetRegisterClass*>
335 getRegForInlineAsmConstraint(const std::string &Constraint,
336 MVT::ValueType VT) const;
338 /// isLegalAddressingMode - Return true if the addressing mode represented
339 /// by AM is legal for this target, for a load/store of the specified type.
340 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
342 /// isLegalAddressImmediate - Return true if the integer value can be used
343 /// as the offset of the target addressing mode for load / store of the
345 virtual bool isLegalAddressImmediate(int64_t V, const Type *Ty) const;
347 /// isLegalAddressImmediate - Return true if the GlobalValue can be used as
348 /// the offset of the target addressing mode.
349 virtual bool isLegalAddressImmediate(GlobalValue *GV) const;
351 /// isLegalAddressScale - Return true if the integer value can be used as
352 /// the scale of the target addressing mode for load / store of the given
354 virtual bool isLegalAddressScale(int64_t S, const Type *Ty) const;
356 /// isLegalAddressScaleAndImm - Return true if S works for
357 /// IsLegalAddressScale and V works for isLegalAddressImmediate _and_
358 /// both can be applied simultaneously to the same instruction.
359 virtual bool isLegalAddressScaleAndImm(int64_t S, int64_t V,
360 const Type *Ty) const;
362 /// isLegalAddressScaleAndImm - Return true if S works for
363 /// IsLegalAddressScale and GV works for isLegalAddressImmediate _and_
364 /// both can be applied simultaneously to the same instruction.
365 virtual bool isLegalAddressScaleAndImm(int64_t S, GlobalValue *GV,
366 const Type *Ty) const;
368 /// isShuffleMaskLegal - Targets can use this to indicate that they only
369 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
370 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
371 /// values are assumed to be legal.
372 virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const;
374 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
375 /// used by Targets can use this to indicate if there is a suitable
376 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
378 virtual bool isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
380 SelectionDAG &DAG) const;
382 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
383 /// make the right decision when generating code for different targets.
384 const X86Subtarget *Subtarget;
386 /// X86StackPtr - X86 physical register used as stack ptr.
387 unsigned X86StackPtr;
389 /// X86ScalarSSE - Select between SSE2 or x87 floating point ops.
392 SDNode *LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode*TheCall,
393 unsigned CallingConv, SelectionDAG &DAG);
395 // C and StdCall Calling Convention implementation.
396 SDOperand LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
397 bool isStdCall = false);
398 SDOperand LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
400 // X86-64 C Calling Convention implementation.
401 SDOperand LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG);
402 SDOperand LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,unsigned CC);
404 // Fast and FastCall Calling Convention implementation.
405 SDOperand LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG);
406 SDOperand LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
408 SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG);
409 SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG);
410 SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG);
411 SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG);
412 SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG);
413 SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG);
414 SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG);
415 SDOperand LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG);
416 SDOperand LowerShift(SDOperand Op, SelectionDAG &DAG);
417 SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG);
418 SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG);
419 SDOperand LowerFABS(SDOperand Op, SelectionDAG &DAG);
420 SDOperand LowerFNEG(SDOperand Op, SelectionDAG &DAG);
421 SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG);
422 SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG, SDOperand Chain);
423 SDOperand LowerSELECT(SDOperand Op, SelectionDAG &DAG);
424 SDOperand LowerBRCOND(SDOperand Op, SelectionDAG &DAG);
425 SDOperand LowerMEMSET(SDOperand Op, SelectionDAG &DAG);
426 SDOperand LowerMEMCPY(SDOperand Op, SelectionDAG &DAG);
427 SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG);
428 SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG);
429 SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG);
430 SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG);
431 SDOperand LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG);
432 SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG);
433 SDOperand LowerVACOPY(SDOperand Op, SelectionDAG &DAG);
434 SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG);
435 SDOperand LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG);
436 SDOperand LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG);
440 #endif // X86ISELLOWERING_H