1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
25 class ARMConstantPoolValue;
28 // ARM Specific DAG Nodes
30 // Start the numbering where the builtin ops and target ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
34 // TargetExternalSymbol, and TargetGlobalAddress.
35 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
37 CALL, // Function call.
38 CALL_PRED, // Function call that's predicable.
39 CALL_NOLINK, // Function call with branch not branch-and-link.
40 tCALL, // Thumb function call.
41 BRCOND, // Conditional branch.
42 BR_JT, // Jumptable branch.
43 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
44 RET_FLAG, // Return with a flag operand.
46 PIC_ADD, // Add with a PC operand and a PIC label.
48 CMP, // ARM compare instructions.
49 CMPZ, // ARM compare that sets only Z flag.
50 CMPFP, // ARM VFP compare instruction, sets FPSCR.
51 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
52 FMSTAT, // ARM fmstat instruction.
53 CMOV, // ARM conditional move instructions.
54 CNEG, // ARM conditional negate instructions.
56 FTOSI, // FP to sint within a FP register.
57 FTOUI, // FP to uint within a FP register.
58 SITOF, // sint to FP within a FP register.
59 UITOF, // uint to FP within a FP register.
61 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
62 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
63 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
65 VMOVRRD, // double to two gprs.
66 VMOVDRR, // Two gprs to double.
68 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
69 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
73 DYN_ALLOC, // Dynamic allocation on the stack.
75 MEMBARRIER, // Memory barrier
76 SYNCBARRIER, // Memory sync barrier
78 VCEQ, // Vector compare equal.
79 VCGE, // Vector compare greater than or equal.
80 VCGEU, // Vector compare unsigned greater than or equal.
81 VCGT, // Vector compare greater than.
82 VCGTU, // Vector compare unsigned greater than.
83 VTST, // Vector test bits.
85 // Vector shift by immediate:
87 VSHRs, // ...right (signed)
88 VSHRu, // ...right (unsigned)
89 VSHLLs, // ...left long (signed)
90 VSHLLu, // ...left long (unsigned)
91 VSHLLi, // ...left long (with maximum shift count)
92 VSHRN, // ...right narrow
94 // Vector rounding shift by immediate:
95 VRSHRs, // ...right (signed)
96 VRSHRu, // ...right (unsigned)
97 VRSHRN, // ...right narrow
99 // Vector saturating shift by immediate:
100 VQSHLs, // ...left (signed)
101 VQSHLu, // ...left (unsigned)
102 VQSHLsu, // ...left (signed to unsigned)
103 VQSHRNs, // ...right narrow (signed)
104 VQSHRNu, // ...right narrow (unsigned)
105 VQSHRNsu, // ...right narrow (signed to unsigned)
107 // Vector saturating rounding shift by immediate:
108 VQRSHRNs, // ...right narrow (signed)
109 VQRSHRNu, // ...right narrow (unsigned)
110 VQRSHRNsu, // ...right narrow (signed to unsigned)
112 // Vector shift and insert:
116 // Vector get lane (VMOV scalar to ARM core register)
117 // (These are used for 8- and 16-bit element types only.)
118 VGETLANEu, // zero-extend vector extract element
119 VGETLANEs, // sign-extend vector extract element
127 VREV64, // reverse elements within 64-bit doublewords
128 VREV32, // reverse elements within 32-bit words
129 VREV16, // reverse elements within 16-bit halfwords
130 VZIP, // zip (interleave)
131 VUZP, // unzip (deinterleave)
136 /// Define some predicates that are used for node matching.
138 /// getVMOVImm - If this is a build_vector of constants which can be
139 /// formed by using a VMOV instruction of the specified element size,
140 /// return the constant being splatted. The ByteSize field indicates the
141 /// number of bytes of each element [1248].
142 SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
144 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
145 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
146 /// instruction, returns its 8-bit integer representation. Otherwise,
148 int getVFPf32Imm(const APFloat &FPImm);
149 int getVFPf64Imm(const APFloat &FPImm);
152 //===--------------------------------------------------------------------===//
153 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
155 class ARMTargetLowering : public TargetLowering {
156 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
158 explicit ARMTargetLowering(TargetMachine &TM);
160 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
162 /// ReplaceNodeResults - Replace the results of node with an illegal result
163 /// type with new values built out of custom code.
165 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
168 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
170 virtual const char *getTargetNodeName(unsigned Opcode) const;
172 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
173 MachineBasicBlock *MBB,
174 DenseMap<MachineBasicBlock*, MachineBasicBlock*>*) const;
176 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
177 /// unaligned memory accesses. of the specified type.
178 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
179 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
181 /// isLegalAddressingMode - Return true if the addressing mode represented
182 /// by AM is legal for this target, for a load/store of the specified type.
183 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
184 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
186 /// isLegalICmpImmediate - Return true if the specified immediate is legal
187 /// icmp immediate, that is the target has icmp instructions which can compare
188 /// a register against the immediate without having to materialize the
189 /// immediate into a register.
190 virtual bool isLegalICmpImmediate(int64_t Imm) const;
192 /// getPreIndexedAddressParts - returns true by value, base pointer and
193 /// offset pointer and addressing mode by reference if the node's address
194 /// can be legally represented as pre-indexed load / store address.
195 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
197 ISD::MemIndexedMode &AM,
198 SelectionDAG &DAG) const;
200 /// getPostIndexedAddressParts - returns true by value, base pointer and
201 /// offset pointer and addressing mode by reference if this node can be
202 /// combined with a load / store to form a post-indexed load / store.
203 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
204 SDValue &Base, SDValue &Offset,
205 ISD::MemIndexedMode &AM,
206 SelectionDAG &DAG) const;
208 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
212 const SelectionDAG &DAG,
213 unsigned Depth) const;
216 ConstraintType getConstraintType(const std::string &Constraint) const;
217 std::pair<unsigned, const TargetRegisterClass*>
218 getRegForInlineAsmConstraint(const std::string &Constraint,
220 std::vector<unsigned>
221 getRegClassForInlineAsmConstraint(const std::string &Constraint,
224 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
225 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
226 /// true it means one of the asm constraint of the inline asm instruction
227 /// being processed is 'm'.
228 virtual void LowerAsmOperandForConstraint(SDValue Op,
229 char ConstraintLetter,
231 std::vector<SDValue> &Ops,
232 SelectionDAG &DAG) const;
234 virtual const ARMSubtarget* getSubtarget() {
238 /// getFunctionAlignment - Return the Log2 alignment of this function.
239 virtual unsigned getFunctionAlignment(const Function *F) const;
241 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
242 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
244 /// isFPImmLegal - Returns true if the target can instruction select the
245 /// specified FP immediate natively. If false, the legalizer will
246 /// materialize the FP immediate as a load from a constant pool.
247 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
250 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
251 /// make the right decision when generating code for different targets.
252 const ARMSubtarget *Subtarget;
254 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
256 unsigned ARMPCLabelIndex;
258 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
259 void addDRTypeForNEON(EVT VT);
260 void addQRTypeForNEON(EVT VT);
262 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
263 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
264 SDValue Chain, SDValue &Arg,
265 RegsToPassVector &RegsToPass,
266 CCValAssign &VA, CCValAssign &NextVA,
268 SmallVector<SDValue, 8> &MemOpChains,
269 ISD::ArgFlagsTy Flags);
270 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
271 SDValue &Root, SelectionDAG &DAG, DebugLoc dl);
273 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, bool isVarArg) const;
274 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
275 DebugLoc dl, SelectionDAG &DAG,
276 const CCValAssign &VA,
277 ISD::ArgFlagsTy Flags);
278 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG);
279 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
280 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG);
281 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG);
282 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG);
283 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
284 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
286 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
288 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG);
289 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG);
290 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG);
291 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG);
292 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
293 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
294 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG);
295 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG);
297 SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
299 SDValue Dst, SDValue Src,
300 SDValue Size, unsigned Align,
302 const Value *DstSV, uint64_t DstSVOff,
303 const Value *SrcSV, uint64_t SrcSVOff);
304 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
305 CallingConv::ID CallConv, bool isVarArg,
306 const SmallVectorImpl<ISD::InputArg> &Ins,
307 DebugLoc dl, SelectionDAG &DAG,
308 SmallVectorImpl<SDValue> &InVals);
311 LowerFormalArguments(SDValue Chain,
312 CallingConv::ID CallConv, bool isVarArg,
313 const SmallVectorImpl<ISD::InputArg> &Ins,
314 DebugLoc dl, SelectionDAG &DAG,
315 SmallVectorImpl<SDValue> &InVals);
318 LowerCall(SDValue Chain, SDValue Callee,
319 CallingConv::ID CallConv, bool isVarArg,
321 const SmallVectorImpl<ISD::OutputArg> &Outs,
322 const SmallVectorImpl<ISD::InputArg> &Ins,
323 DebugLoc dl, SelectionDAG &DAG,
324 SmallVectorImpl<SDValue> &InVals);
327 LowerReturn(SDValue Chain,
328 CallingConv::ID CallConv, bool isVarArg,
329 const SmallVectorImpl<ISD::OutputArg> &Outs,
330 DebugLoc dl, SelectionDAG &DAG);
332 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
333 SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl);
335 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
336 MachineBasicBlock *BB,
337 unsigned Size) const;
338 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
339 MachineBasicBlock *BB,
341 unsigned BinOpcode) const;
346 #endif // ARMISELLOWERING_H