1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/Target/TargetRegisterInfo.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
27 class ARMConstantPoolValue;
30 // ARM Specific DAG Nodes
32 // Start the numbering where the builtin ops and target ops leave off.
33 FIRST_NUMBER = ISD::BUILTIN_OP_END,
35 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
36 // TargetExternalSymbol, and TargetGlobalAddress.
37 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
39 CALL, // Function call.
40 CALL_PRED, // Function call that's predicable.
41 CALL_NOLINK, // Function call with branch not branch-and-link.
42 tCALL, // Thumb function call.
43 BRCOND, // Conditional branch.
44 BR_JT, // Jumptable branch.
45 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
46 RET_FLAG, // Return with a flag operand.
48 PIC_ADD, // Add with a PC operand and a PIC label.
50 CMP, // ARM compare instructions.
51 CMPZ, // ARM compare that sets only Z flag.
52 CMPFP, // ARM VFP compare instruction, sets FPSCR.
53 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
54 FMSTAT, // ARM fmstat instruction.
55 CMOV, // ARM conditional move instructions.
56 CNEG, // ARM conditional negate instructions.
60 RBIT, // ARM bitreverse instruction
62 FTOSI, // FP to sint within a FP register.
63 FTOUI, // FP to uint within a FP register.
64 SITOF, // sint to FP within a FP register.
65 UITOF, // uint to FP within a FP register.
67 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
68 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
69 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
71 VMOVRRD, // double to two gprs.
72 VMOVDRR, // Two gprs to double.
74 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
75 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
77 TC_RETURN, // Tail call return pseudo.
81 DYN_ALLOC, // Dynamic allocation on the stack.
83 MEMBARRIER, // Memory barrier
84 SYNCBARRIER, // Memory sync barrier
86 VCEQ, // Vector compare equal.
87 VCGE, // Vector compare greater than or equal.
88 VCGEU, // Vector compare unsigned greater than or equal.
89 VCGT, // Vector compare greater than.
90 VCGTU, // Vector compare unsigned greater than.
91 VTST, // Vector test bits.
93 // Vector shift by immediate:
95 VSHRs, // ...right (signed)
96 VSHRu, // ...right (unsigned)
97 VSHLLs, // ...left long (signed)
98 VSHLLu, // ...left long (unsigned)
99 VSHLLi, // ...left long (with maximum shift count)
100 VSHRN, // ...right narrow
102 // Vector rounding shift by immediate:
103 VRSHRs, // ...right (signed)
104 VRSHRu, // ...right (unsigned)
105 VRSHRN, // ...right narrow
107 // Vector saturating shift by immediate:
108 VQSHLs, // ...left (signed)
109 VQSHLu, // ...left (unsigned)
110 VQSHLsu, // ...left (signed to unsigned)
111 VQSHRNs, // ...right narrow (signed)
112 VQSHRNu, // ...right narrow (unsigned)
113 VQSHRNsu, // ...right narrow (signed to unsigned)
115 // Vector saturating rounding shift by immediate:
116 VQRSHRNs, // ...right narrow (signed)
117 VQRSHRNu, // ...right narrow (unsigned)
118 VQRSHRNsu, // ...right narrow (signed to unsigned)
120 // Vector shift and insert:
124 // Vector get lane (VMOV scalar to ARM core register)
125 // (These are used for 8- and 16-bit element types only.)
126 VGETLANEu, // zero-extend vector extract element
127 VGETLANEs, // sign-extend vector extract element
129 // Vector move immediate and move negated immediate:
139 VREV64, // reverse elements within 64-bit doublewords
140 VREV32, // reverse elements within 32-bit words
141 VREV16, // reverse elements within 16-bit halfwords
142 VZIP, // zip (interleave)
143 VUZP, // unzip (deinterleave)
146 // Operands of the standard BUILD_VECTOR node are not legalized, which
147 // is fine if BUILD_VECTORs are always lowered to shuffles or other
148 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
149 // operands need to be legalized. Define an ARM-specific version of
150 // BUILD_VECTOR for this purpose.
153 // Floating-point max and min:
162 /// Define some predicates that are used for node matching.
164 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
165 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
166 /// instruction, returns its 8-bit integer representation. Otherwise,
168 int getVFPf32Imm(const APFloat &FPImm);
169 int getVFPf64Imm(const APFloat &FPImm);
170 bool isBitFieldInvertedMask(unsigned v);
173 //===--------------------------------------------------------------------===//
174 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
176 class ARMTargetLowering : public TargetLowering {
178 explicit ARMTargetLowering(TargetMachine &TM);
180 virtual unsigned getJumpTableEncoding(void) const;
182 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
184 /// ReplaceNodeResults - Replace the results of node with an illegal result
185 /// type with new values built out of custom code.
187 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
188 SelectionDAG &DAG) const;
190 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
192 virtual const char *getTargetNodeName(unsigned Opcode) const;
194 virtual MachineBasicBlock *
195 EmitInstrWithCustomInserter(MachineInstr *MI,
196 MachineBasicBlock *MBB) const;
198 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
199 /// unaligned memory accesses. of the specified type.
200 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
201 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
203 /// isLegalAddressingMode - Return true if the addressing mode represented
204 /// by AM is legal for this target, for a load/store of the specified type.
205 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
206 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
208 /// isLegalICmpImmediate - Return true if the specified immediate is legal
209 /// icmp immediate, that is the target has icmp instructions which can
210 /// compare a register against the immediate without having to materialize
211 /// the immediate into a register.
212 virtual bool isLegalICmpImmediate(int64_t Imm) const;
214 /// getPreIndexedAddressParts - returns true by value, base pointer and
215 /// offset pointer and addressing mode by reference if the node's address
216 /// can be legally represented as pre-indexed load / store address.
217 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
219 ISD::MemIndexedMode &AM,
220 SelectionDAG &DAG) const;
222 /// getPostIndexedAddressParts - returns true by value, base pointer and
223 /// offset pointer and addressing mode by reference if this node can be
224 /// combined with a load / store to form a post-indexed load / store.
225 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
226 SDValue &Base, SDValue &Offset,
227 ISD::MemIndexedMode &AM,
228 SelectionDAG &DAG) const;
230 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
234 const SelectionDAG &DAG,
235 unsigned Depth) const;
238 ConstraintType getConstraintType(const std::string &Constraint) const;
239 std::pair<unsigned, const TargetRegisterClass*>
240 getRegForInlineAsmConstraint(const std::string &Constraint,
242 std::vector<unsigned>
243 getRegClassForInlineAsmConstraint(const std::string &Constraint,
246 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
247 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
248 /// true it means one of the asm constraint of the inline asm instruction
249 /// being processed is 'm'.
250 virtual void LowerAsmOperandForConstraint(SDValue Op,
251 char ConstraintLetter,
252 std::vector<SDValue> &Ops,
253 SelectionDAG &DAG) const;
255 const ARMSubtarget* getSubtarget() const {
259 /// getRegClassFor - Return the register class that should be used for the
260 /// specified value type.
261 virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
263 /// getFunctionAlignment - Return the Log2 alignment of this function.
264 virtual unsigned getFunctionAlignment(const Function *F) const;
266 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
267 /// be used for loads / stores from the global.
268 virtual unsigned getMaximalGlobalOffset() const;
270 /// createFastISel - This method returns a target specific FastISel object,
271 /// or null if the target does not support "fast" ISel.
272 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
274 Sched::Preference getSchedulingPreference(SDNode *N) const;
276 unsigned getRegPressureLimit(const TargetRegisterClass *RC,
277 MachineFunction &MF) const;
279 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
280 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
282 /// isFPImmLegal - Returns true if the target can instruction select the
283 /// specified FP immediate natively. If false, the legalizer will
284 /// materialize the FP immediate as a load from a constant pool.
285 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
288 std::pair<const TargetRegisterClass*, uint8_t>
289 findRepresentativeClass(EVT VT) const;
292 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
293 /// make the right decision when generating code for different targets.
294 const ARMSubtarget *Subtarget;
296 const TargetRegisterInfo *RegInfo;
298 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
300 unsigned ARMPCLabelIndex;
302 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
303 void addDRTypeForNEON(EVT VT);
304 void addQRTypeForNEON(EVT VT);
306 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
307 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
308 SDValue Chain, SDValue &Arg,
309 RegsToPassVector &RegsToPass,
310 CCValAssign &VA, CCValAssign &NextVA,
312 SmallVector<SDValue, 8> &MemOpChains,
313 ISD::ArgFlagsTy Flags) const;
314 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
315 SDValue &Root, SelectionDAG &DAG,
318 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
319 bool isVarArg) const;
320 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
321 DebugLoc dl, SelectionDAG &DAG,
322 const CCValAssign &VA,
323 ISD::ArgFlagsTy Flags) const;
324 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
325 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
326 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
327 const ARMSubtarget *Subtarget) const;
328 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
329 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
330 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
331 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
332 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
333 SelectionDAG &DAG) const;
334 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
335 SelectionDAG &DAG) const;
336 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
337 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
338 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
339 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
340 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
341 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
342 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
343 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
344 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
345 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
347 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
348 CallingConv::ID CallConv, bool isVarArg,
349 const SmallVectorImpl<ISD::InputArg> &Ins,
350 DebugLoc dl, SelectionDAG &DAG,
351 SmallVectorImpl<SDValue> &InVals) const;
354 LowerFormalArguments(SDValue Chain,
355 CallingConv::ID CallConv, bool isVarArg,
356 const SmallVectorImpl<ISD::InputArg> &Ins,
357 DebugLoc dl, SelectionDAG &DAG,
358 SmallVectorImpl<SDValue> &InVals) const;
361 LowerCall(SDValue Chain, SDValue Callee,
362 CallingConv::ID CallConv, bool isVarArg,
364 const SmallVectorImpl<ISD::OutputArg> &Outs,
365 const SmallVectorImpl<SDValue> &OutVals,
366 const SmallVectorImpl<ISD::InputArg> &Ins,
367 DebugLoc dl, SelectionDAG &DAG,
368 SmallVectorImpl<SDValue> &InVals) const;
370 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
371 /// for tail call optimization. Targets which want to do tail call
372 /// optimization should implement this function.
373 bool IsEligibleForTailCallOptimization(SDValue Callee,
374 CallingConv::ID CalleeCC,
376 bool isCalleeStructRet,
377 bool isCallerStructRet,
378 const SmallVectorImpl<ISD::OutputArg> &Outs,
379 const SmallVectorImpl<SDValue> &OutVals,
380 const SmallVectorImpl<ISD::InputArg> &Ins,
381 SelectionDAG& DAG) const;
383 LowerReturn(SDValue Chain,
384 CallingConv::ID CallConv, bool isVarArg,
385 const SmallVectorImpl<ISD::OutputArg> &Outs,
386 const SmallVectorImpl<SDValue> &OutVals,
387 DebugLoc dl, SelectionDAG &DAG) const;
389 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
390 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
391 SDValue getVFPCmp(SDValue LHS, SDValue RHS,
392 SelectionDAG &DAG, DebugLoc dl) const;
394 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
396 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
397 MachineBasicBlock *BB,
398 unsigned Size) const;
399 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
400 MachineBasicBlock *BB,
402 unsigned BinOpcode) const;
407 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
411 #endif // ARMISELLOWERING_H