1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
19 #include "ARMSubtarget.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
28 class ARMConstantPoolValue;
31 // ARM Specific DAG Nodes
33 // Start the numbering where the builtin ops and target ops leave off.
34 FIRST_NUMBER = ISD::BUILTIN_OP_END,
36 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
37 // TargetExternalSymbol, and TargetGlobalAddress.
38 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
40 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
42 // Add pseudo op to model memcpy for struct byval.
45 CALL, // Function call.
46 CALL_PRED, // Function call that's predicable.
47 CALL_NOLINK, // Function call with branch not branch-and-link.
48 tCALL, // Thumb function call.
49 BRCOND, // Conditional branch.
50 BR_JT, // Jumptable branch.
51 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
52 RET_FLAG, // Return with a flag operand.
53 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
55 PIC_ADD, // Add with a PC operand and a PIC label.
57 CMP, // ARM compare instructions.
58 CMN, // ARM CMN instructions.
59 CMPZ, // ARM compare that sets only Z flag.
60 CMPFP, // ARM VFP compare instruction, sets FPSCR.
61 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
62 FMSTAT, // ARM fmstat instruction.
64 CMOV, // ARM conditional move instructions.
68 RBIT, // ARM bitreverse instruction
70 FTOSI, // FP to sint within a FP register.
71 FTOUI, // FP to uint within a FP register.
72 SITOF, // sint to FP within a FP register.
73 UITOF, // uint to FP within a FP register.
75 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
76 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
77 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
79 ADDC, // Add with carry
80 ADDE, // Add using carry
81 SUBC, // Sub with carry
82 SUBE, // Sub using carry
84 VMOVRRD, // double to two gprs.
85 VMOVDRR, // Two gprs to double.
87 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
88 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
90 TC_RETURN, // Tail call return pseudo.
94 DYN_ALLOC, // Dynamic allocation on the stack.
96 MEMBARRIER_MCR, // Memory barrier (MCR)
100 VCEQ, // Vector compare equal.
101 VCEQZ, // Vector compare equal to zero.
102 VCGE, // Vector compare greater than or equal.
103 VCGEZ, // Vector compare greater than or equal to zero.
104 VCLEZ, // Vector compare less than or equal to zero.
105 VCGEU, // Vector compare unsigned greater than or equal.
106 VCGT, // Vector compare greater than.
107 VCGTZ, // Vector compare greater than zero.
108 VCLTZ, // Vector compare less than zero.
109 VCGTU, // Vector compare unsigned greater than.
110 VTST, // Vector test bits.
112 // Vector shift by immediate:
114 VSHRs, // ...right (signed)
115 VSHRu, // ...right (unsigned)
116 VSHLLs, // ...left long (signed)
117 VSHLLu, // ...left long (unsigned)
118 VSHLLi, // ...left long (with maximum shift count)
119 VSHRN, // ...right narrow
121 // Vector rounding shift by immediate:
122 VRSHRs, // ...right (signed)
123 VRSHRu, // ...right (unsigned)
124 VRSHRN, // ...right narrow
126 // Vector saturating shift by immediate:
127 VQSHLs, // ...left (signed)
128 VQSHLu, // ...left (unsigned)
129 VQSHLsu, // ...left (signed to unsigned)
130 VQSHRNs, // ...right narrow (signed)
131 VQSHRNu, // ...right narrow (unsigned)
132 VQSHRNsu, // ...right narrow (signed to unsigned)
134 // Vector saturating rounding shift by immediate:
135 VQRSHRNs, // ...right narrow (signed)
136 VQRSHRNu, // ...right narrow (unsigned)
137 VQRSHRNsu, // ...right narrow (signed to unsigned)
139 // Vector shift and insert:
143 // Vector get lane (VMOV scalar to ARM core register)
144 // (These are used for 8- and 16-bit element types only.)
145 VGETLANEu, // zero-extend vector extract element
146 VGETLANEs, // sign-extend vector extract element
148 // Vector move immediate and move negated immediate:
152 // Vector move f32 immediate:
161 VREV64, // reverse elements within 64-bit doublewords
162 VREV32, // reverse elements within 32-bit words
163 VREV16, // reverse elements within 16-bit halfwords
164 VZIP, // zip (interleave)
165 VUZP, // unzip (deinterleave)
167 VTBL1, // 1-register shuffle with mask
168 VTBL2, // 2-register shuffle with mask
170 // Vector multiply long:
172 VMULLu, // ...unsigned
174 UMLAL, // 64bit Unsigned Accumulate Multiply
175 SMLAL, // 64bit Signed Accumulate Multiply
177 // Operands of the standard BUILD_VECTOR node are not legalized, which
178 // is fine if BUILD_VECTORs are always lowered to shuffles or other
179 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
180 // operands need to be legalized. Define an ARM-specific version of
181 // BUILD_VECTOR for this purpose.
184 // Floating-point max and min:
193 // Vector OR with immediate
195 // Vector AND with NOT of immediate
198 // Vector bitwise select
201 // Vector load N-element structure to all lanes:
202 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
206 // NEON loads with post-increment base updates:
218 // NEON stores with post-increment base updates:
229 /// Define some predicates that are used for node matching.
231 bool isBitFieldInvertedMask(unsigned v);
234 //===--------------------------------------------------------------------===//
235 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
237 class ARMTargetLowering : public TargetLowering {
239 explicit ARMTargetLowering(TargetMachine &TM);
241 virtual unsigned getJumpTableEncoding() const;
243 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
245 /// ReplaceNodeResults - Replace the results of node with an illegal result
246 /// type with new values built out of custom code.
248 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
249 SelectionDAG &DAG) const;
251 virtual const char *getTargetNodeName(unsigned Opcode) const;
253 virtual bool isSelectSupported(SelectSupportKind Kind) const {
254 // ARM does not support scalar condition selects on vectors.
255 return (Kind != ScalarCondVectorVal);
258 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
259 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
261 virtual MachineBasicBlock *
262 EmitInstrWithCustomInserter(MachineInstr *MI,
263 MachineBasicBlock *MBB) const;
266 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
268 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
269 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
271 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
273 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
274 /// unaligned memory accesses of the specified type. Returns whether it
275 /// is "fast" by reference in the second argument.
276 virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
278 virtual EVT getOptimalMemOpType(uint64_t Size,
279 unsigned DstAlign, unsigned SrcAlign,
280 bool IsMemset, bool ZeroMemset,
282 MachineFunction &MF) const;
284 using TargetLowering::isZExtFree;
285 virtual bool isZExtFree(SDValue Val, EVT VT2) const;
287 virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
290 /// isLegalAddressingMode - Return true if the addressing mode represented
291 /// by AM is legal for this target, for a load/store of the specified type.
292 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
293 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
295 /// isLegalICmpImmediate - Return true if the specified immediate is legal
296 /// icmp immediate, that is the target has icmp instructions which can
297 /// compare a register against the immediate without having to materialize
298 /// the immediate into a register.
299 virtual bool isLegalICmpImmediate(int64_t Imm) const;
301 /// isLegalAddImmediate - Return true if the specified immediate is legal
302 /// add immediate, that is the target has add instructions which can
303 /// add a register and the immediate without having to materialize
304 /// the immediate into a register.
305 virtual bool isLegalAddImmediate(int64_t Imm) const;
307 /// getPreIndexedAddressParts - returns true by value, base pointer and
308 /// offset pointer and addressing mode by reference if the node's address
309 /// can be legally represented as pre-indexed load / store address.
310 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
312 ISD::MemIndexedMode &AM,
313 SelectionDAG &DAG) const;
315 /// getPostIndexedAddressParts - returns true by value, base pointer and
316 /// offset pointer and addressing mode by reference if this node can be
317 /// combined with a load / store to form a post-indexed load / store.
318 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
319 SDValue &Base, SDValue &Offset,
320 ISD::MemIndexedMode &AM,
321 SelectionDAG &DAG) const;
323 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
326 const SelectionDAG &DAG,
327 unsigned Depth) const;
330 virtual bool ExpandInlineAsm(CallInst *CI) const;
332 ConstraintType getConstraintType(const std::string &Constraint) const;
334 /// Examine constraint string and operand type and determine a weight value.
335 /// The operand object must already have been set up with the operand type.
336 ConstraintWeight getSingleConstraintMatchWeight(
337 AsmOperandInfo &info, const char *constraint) const;
339 std::pair<unsigned, const TargetRegisterClass*>
340 getRegForInlineAsmConstraint(const std::string &Constraint,
343 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
344 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
345 /// true it means one of the asm constraint of the inline asm instruction
346 /// being processed is 'm'.
347 virtual void LowerAsmOperandForConstraint(SDValue Op,
348 std::string &Constraint,
349 std::vector<SDValue> &Ops,
350 SelectionDAG &DAG) const;
352 const ARMSubtarget* getSubtarget() const {
356 /// getRegClassFor - Return the register class that should be used for the
357 /// specified value type.
358 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const;
360 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
361 /// be used for loads / stores from the global.
362 virtual unsigned getMaximalGlobalOffset() const;
364 /// Returns true if a cast between SrcAS and DestAS is a noop.
365 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
366 // Addrspacecasts are always noops.
370 /// createFastISel - This method returns a target specific FastISel object,
371 /// or null if the target does not support "fast" ISel.
372 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
373 const TargetLibraryInfo *libInfo) const;
375 Sched::Preference getSchedulingPreference(SDNode *N) const;
377 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
378 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
380 /// isFPImmLegal - Returns true if the target can instruction select the
381 /// specified FP immediate natively. If false, the legalizer will
382 /// materialize the FP immediate as a load from a constant pool.
383 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
385 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
387 unsigned Intrinsic) const;
389 std::pair<const TargetRegisterClass*, uint8_t>
390 findRepresentativeClass(MVT VT) const;
393 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
394 /// make the right decision when generating code for different targets.
395 const ARMSubtarget *Subtarget;
397 const TargetRegisterInfo *RegInfo;
399 const InstrItineraryData *Itins;
401 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
403 unsigned ARMPCLabelIndex;
405 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
406 void addDRTypeForNEON(MVT VT);
407 void addQRTypeForNEON(MVT VT);
409 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
410 void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
411 SDValue Chain, SDValue &Arg,
412 RegsToPassVector &RegsToPass,
413 CCValAssign &VA, CCValAssign &NextVA,
415 SmallVectorImpl<SDValue> &MemOpChains,
416 ISD::ArgFlagsTy Flags) const;
417 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
418 SDValue &Root, SelectionDAG &DAG,
421 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
422 bool isVarArg) const;
423 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
424 SDLoc dl, SelectionDAG &DAG,
425 const CCValAssign &VA,
426 ISD::ArgFlagsTy Flags) const;
427 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
428 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
429 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
430 const ARMSubtarget *Subtarget) const;
431 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
432 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
433 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
434 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
435 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
436 SelectionDAG &DAG) const;
437 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
439 TLSModel::Model model) const;
440 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
441 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
442 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
443 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
444 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
445 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
446 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
447 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
448 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
449 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
450 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
451 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
452 const ARMSubtarget *ST) const;
453 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
454 const ARMSubtarget *ST) const;
455 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
456 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
458 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
459 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
460 /// expanded to FMAs when this method returns true, otherwise fmuladd is
461 /// expanded to fmul + fadd.
463 /// ARM supports both fused and unfused multiply-add operations; we already
464 /// lower a pair of fmul and fadd to the latter so it's not clear that there
465 /// would be a gain or that the gain would be worthwhile enough to risk
466 /// correctness bugs.
467 virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; }
469 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
472 CallingConv::ID CallConv, bool isVarArg,
473 const SmallVectorImpl<ISD::InputArg> &Ins,
474 SDLoc dl, SelectionDAG &DAG,
475 SmallVectorImpl<SDValue> &InVals,
476 bool isThisReturn, SDValue ThisVal) const;
479 LowerFormalArguments(SDValue Chain,
480 CallingConv::ID CallConv, bool isVarArg,
481 const SmallVectorImpl<ISD::InputArg> &Ins,
482 SDLoc dl, SelectionDAG &DAG,
483 SmallVectorImpl<SDValue> &InVals) const;
485 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
486 SDLoc dl, SDValue &Chain,
487 const Value *OrigArg,
488 unsigned InRegsParamRecordIdx,
489 unsigned OffsetFromOrigArg,
492 bool ForceMutable) const;
494 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
495 SDLoc dl, SDValue &Chain,
497 bool ForceMutable = false) const;
499 void computeRegArea(CCState &CCInfo, MachineFunction &MF,
500 unsigned InRegsParamRecordIdx,
502 unsigned &ArgRegsSize,
503 unsigned &ArgRegsSaveSize) const;
506 LowerCall(TargetLowering::CallLoweringInfo &CLI,
507 SmallVectorImpl<SDValue> &InVals) const;
509 /// HandleByVal - Target-specific cleanup for ByVal support.
510 virtual void HandleByVal(CCState *, unsigned &, unsigned) const;
512 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
513 /// for tail call optimization. Targets which want to do tail call
514 /// optimization should implement this function.
515 bool IsEligibleForTailCallOptimization(SDValue Callee,
516 CallingConv::ID CalleeCC,
518 bool isCalleeStructRet,
519 bool isCallerStructRet,
520 const SmallVectorImpl<ISD::OutputArg> &Outs,
521 const SmallVectorImpl<SDValue> &OutVals,
522 const SmallVectorImpl<ISD::InputArg> &Ins,
523 SelectionDAG& DAG) const;
525 virtual bool CanLowerReturn(CallingConv::ID CallConv,
526 MachineFunction &MF, bool isVarArg,
527 const SmallVectorImpl<ISD::OutputArg> &Outs,
528 LLVMContext &Context) const;
531 LowerReturn(SDValue Chain,
532 CallingConv::ID CallConv, bool isVarArg,
533 const SmallVectorImpl<ISD::OutputArg> &Outs,
534 const SmallVectorImpl<SDValue> &OutVals,
535 SDLoc dl, SelectionDAG &DAG) const;
537 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
539 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
541 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
542 SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
543 SDValue getVFPCmp(SDValue LHS, SDValue RHS,
544 SelectionDAG &DAG, SDLoc dl) const;
545 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
547 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
549 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
550 MachineBasicBlock *BB,
551 unsigned Size) const;
552 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
553 MachineBasicBlock *BB,
555 unsigned BinOpcode) const;
556 MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI,
557 MachineBasicBlock *BB,
560 bool NeedsCarry = false,
561 bool IsCmpxchg = false,
562 bool IsMinMax = false,
563 ARMCC::CondCodes CC = ARMCC::AL) const;
564 MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI,
565 MachineBasicBlock *BB,
568 ARMCC::CondCodes Cond) const;
569 MachineBasicBlock *EmitAtomicLoad64(MachineInstr *MI,
570 MachineBasicBlock *BB) const;
572 void SetupEntryBlockForSjLj(MachineInstr *MI,
573 MachineBasicBlock *MBB,
574 MachineBasicBlock *DispatchBB, int FI) const;
576 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI,
577 MachineBasicBlock *MBB) const;
579 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
581 MachineBasicBlock *EmitStructByval(MachineInstr *MI,
582 MachineBasicBlock *MBB) const;
585 enum NEONModImmType {
593 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
594 const TargetLibraryInfo *libInfo);
598 #endif // ARMISELLOWERING_H