1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
25 class ARMConstantPoolValue;
28 // ARM Specific DAG Nodes
30 // Start the numbering where the builtin ops and target ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
34 // TargetExternalSymbol, and TargetGlobalAddress.
35 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
37 CALL, // Function call.
38 CALL_PRED, // Function call that's predicable.
39 CALL_NOLINK, // Function call with branch not branch-and-link.
40 tCALL, // Thumb function call.
41 BRCOND, // Conditional branch.
42 BR_JT, // Jumptable branch.
43 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
44 RET_FLAG, // Return with a flag operand.
46 PIC_ADD, // Add with a PC operand and a PIC label.
48 CMP, // ARM compare instructions.
49 CMPZ, // ARM compare that sets only Z flag.
50 CMPFP, // ARM VFP compare instruction, sets FPSCR.
51 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
52 FMSTAT, // ARM fmstat instruction.
53 CMOV, // ARM conditional move instructions.
54 CNEG, // ARM conditional negate instructions.
56 RBIT, // ARM bitreverse instruction
58 FTOSI, // FP to sint within a FP register.
59 FTOUI, // FP to uint within a FP register.
60 SITOF, // sint to FP within a FP register.
61 UITOF, // uint to FP within a FP register.
63 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
64 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
65 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
67 VMOVRRD, // double to two gprs.
68 VMOVDRR, // Two gprs to double.
70 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
71 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
75 DYN_ALLOC, // Dynamic allocation on the stack.
77 MEMBARRIER, // Memory barrier
78 SYNCBARRIER, // Memory sync barrier
80 VCEQ, // Vector compare equal.
81 VCGE, // Vector compare greater than or equal.
82 VCGEU, // Vector compare unsigned greater than or equal.
83 VCGT, // Vector compare greater than.
84 VCGTU, // Vector compare unsigned greater than.
85 VTST, // Vector test bits.
87 // Vector shift by immediate:
89 VSHRs, // ...right (signed)
90 VSHRu, // ...right (unsigned)
91 VSHLLs, // ...left long (signed)
92 VSHLLu, // ...left long (unsigned)
93 VSHLLi, // ...left long (with maximum shift count)
94 VSHRN, // ...right narrow
96 // Vector rounding shift by immediate:
97 VRSHRs, // ...right (signed)
98 VRSHRu, // ...right (unsigned)
99 VRSHRN, // ...right narrow
101 // Vector saturating shift by immediate:
102 VQSHLs, // ...left (signed)
103 VQSHLu, // ...left (unsigned)
104 VQSHLsu, // ...left (signed to unsigned)
105 VQSHRNs, // ...right narrow (signed)
106 VQSHRNu, // ...right narrow (unsigned)
107 VQSHRNsu, // ...right narrow (signed to unsigned)
109 // Vector saturating rounding shift by immediate:
110 VQRSHRNs, // ...right narrow (signed)
111 VQRSHRNu, // ...right narrow (unsigned)
112 VQRSHRNsu, // ...right narrow (signed to unsigned)
114 // Vector shift and insert:
118 // Vector get lane (VMOV scalar to ARM core register)
119 // (These are used for 8- and 16-bit element types only.)
120 VGETLANEu, // zero-extend vector extract element
121 VGETLANEs, // sign-extend vector extract element
129 VREV64, // reverse elements within 64-bit doublewords
130 VREV32, // reverse elements within 32-bit words
131 VREV16, // reverse elements within 16-bit halfwords
132 VZIP, // zip (interleave)
133 VUZP, // unzip (deinterleave)
138 /// Define some predicates that are used for node matching.
140 /// getVMOVImm - If this is a build_vector of constants which can be
141 /// formed by using a VMOV instruction of the specified element size,
142 /// return the constant being splatted. The ByteSize field indicates the
143 /// number of bytes of each element [1248].
144 SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
146 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
147 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
148 /// instruction, returns its 8-bit integer representation. Otherwise,
150 int getVFPf32Imm(const APFloat &FPImm);
151 int getVFPf64Imm(const APFloat &FPImm);
154 //===--------------------------------------------------------------------===//
155 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
157 class ARMTargetLowering : public TargetLowering {
158 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
160 explicit ARMTargetLowering(TargetMachine &TM);
162 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
164 /// ReplaceNodeResults - Replace the results of node with an illegal result
165 /// type with new values built out of custom code.
167 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
170 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
172 virtual const char *getTargetNodeName(unsigned Opcode) const;
174 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
175 MachineBasicBlock *MBB,
176 DenseMap<MachineBasicBlock*, MachineBasicBlock*>*) const;
178 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
179 /// unaligned memory accesses. of the specified type.
180 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
181 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
183 /// isLegalAddressingMode - Return true if the addressing mode represented
184 /// by AM is legal for this target, for a load/store of the specified type.
185 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
186 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
188 /// isLegalICmpImmediate - Return true if the specified immediate is legal
189 /// icmp immediate, that is the target has icmp instructions which can compare
190 /// a register against the immediate without having to materialize the
191 /// immediate into a register.
192 virtual bool isLegalICmpImmediate(int64_t Imm) const;
194 /// getPreIndexedAddressParts - returns true by value, base pointer and
195 /// offset pointer and addressing mode by reference if the node's address
196 /// can be legally represented as pre-indexed load / store address.
197 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
199 ISD::MemIndexedMode &AM,
200 SelectionDAG &DAG) const;
202 /// getPostIndexedAddressParts - returns true by value, base pointer and
203 /// offset pointer and addressing mode by reference if this node can be
204 /// combined with a load / store to form a post-indexed load / store.
205 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
206 SDValue &Base, SDValue &Offset,
207 ISD::MemIndexedMode &AM,
208 SelectionDAG &DAG) const;
210 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
214 const SelectionDAG &DAG,
215 unsigned Depth) const;
218 ConstraintType getConstraintType(const std::string &Constraint) const;
219 std::pair<unsigned, const TargetRegisterClass*>
220 getRegForInlineAsmConstraint(const std::string &Constraint,
222 std::vector<unsigned>
223 getRegClassForInlineAsmConstraint(const std::string &Constraint,
226 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
227 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
228 /// true it means one of the asm constraint of the inline asm instruction
229 /// being processed is 'm'.
230 virtual void LowerAsmOperandForConstraint(SDValue Op,
231 char ConstraintLetter,
233 std::vector<SDValue> &Ops,
234 SelectionDAG &DAG) const;
236 virtual const ARMSubtarget* getSubtarget() {
240 /// getFunctionAlignment - Return the Log2 alignment of this function.
241 virtual unsigned getFunctionAlignment(const Function *F) const;
243 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
244 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
246 /// isFPImmLegal - Returns true if the target can instruction select the
247 /// specified FP immediate natively. If false, the legalizer will
248 /// materialize the FP immediate as a load from a constant pool.
249 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
252 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
253 /// make the right decision when generating code for different targets.
254 const ARMSubtarget *Subtarget;
256 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
258 unsigned ARMPCLabelIndex;
260 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
261 void addDRTypeForNEON(EVT VT);
262 void addQRTypeForNEON(EVT VT);
264 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
265 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
266 SDValue Chain, SDValue &Arg,
267 RegsToPassVector &RegsToPass,
268 CCValAssign &VA, CCValAssign &NextVA,
270 SmallVector<SDValue, 8> &MemOpChains,
271 ISD::ArgFlagsTy Flags);
272 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
273 SDValue &Root, SelectionDAG &DAG, DebugLoc dl);
275 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, bool isVarArg) const;
276 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
277 DebugLoc dl, SelectionDAG &DAG,
278 const CCValAssign &VA,
279 ISD::ArgFlagsTy Flags);
280 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG);
281 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
282 const ARMSubtarget *Subtarget);
283 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG);
284 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG);
285 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG);
286 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
287 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
289 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
291 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG);
292 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG);
293 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG);
294 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG);
295 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
296 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
297 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG);
298 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG);
300 SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
302 SDValue Dst, SDValue Src,
303 SDValue Size, unsigned Align,
305 const Value *DstSV, uint64_t DstSVOff,
306 const Value *SrcSV, uint64_t SrcSVOff);
307 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
308 CallingConv::ID CallConv, bool isVarArg,
309 const SmallVectorImpl<ISD::InputArg> &Ins,
310 DebugLoc dl, SelectionDAG &DAG,
311 SmallVectorImpl<SDValue> &InVals);
314 LowerFormalArguments(SDValue Chain,
315 CallingConv::ID CallConv, bool isVarArg,
316 const SmallVectorImpl<ISD::InputArg> &Ins,
317 DebugLoc dl, SelectionDAG &DAG,
318 SmallVectorImpl<SDValue> &InVals);
321 LowerCall(SDValue Chain, SDValue Callee,
322 CallingConv::ID CallConv, bool isVarArg,
324 const SmallVectorImpl<ISD::OutputArg> &Outs,
325 const SmallVectorImpl<ISD::InputArg> &Ins,
326 DebugLoc dl, SelectionDAG &DAG,
327 SmallVectorImpl<SDValue> &InVals);
330 LowerReturn(SDValue Chain,
331 CallingConv::ID CallConv, bool isVarArg,
332 const SmallVectorImpl<ISD::OutputArg> &Outs,
333 DebugLoc dl, SelectionDAG &DAG);
335 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
336 SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl);
338 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
339 MachineBasicBlock *BB,
340 unsigned Size) const;
341 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
342 MachineBasicBlock *BB,
344 unsigned BinOpcode) const;
349 #endif // ARMISELLOWERING_H