1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
19 #include "ARMSubtarget.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
28 class ARMConstantPoolValue;
31 // ARM Specific DAG Nodes
33 // Start the numbering where the builtin ops and target ops leave off.
34 FIRST_NUMBER = ISD::BUILTIN_OP_END,
36 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
37 // TargetExternalSymbol, and TargetGlobalAddress.
38 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
40 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
42 // Add pseudo op to model memcpy for struct byval.
45 CALL, // Function call.
46 CALL_PRED, // Function call that's predicable.
47 CALL_NOLINK, // Function call with branch not branch-and-link.
48 tCALL, // Thumb function call.
49 BRCOND, // Conditional branch.
50 BR_JT, // Jumptable branch.
51 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
52 RET_FLAG, // Return with a flag operand.
53 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
55 PIC_ADD, // Add with a PC operand and a PIC label.
57 CMP, // ARM compare instructions.
58 CMN, // ARM CMN instructions.
59 CMPZ, // ARM compare that sets only Z flag.
60 CMPFP, // ARM VFP compare instruction, sets FPSCR.
61 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
62 FMSTAT, // ARM fmstat instruction.
64 CMOV, // ARM conditional move instructions.
68 RBIT, // ARM bitreverse instruction
70 FTOSI, // FP to sint within a FP register.
71 FTOUI, // FP to uint within a FP register.
72 SITOF, // sint to FP within a FP register.
73 UITOF, // uint to FP within a FP register.
75 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
76 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
77 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
79 ADDC, // Add with carry
80 ADDE, // Add using carry
81 SUBC, // Sub with carry
82 SUBE, // Sub using carry
84 VMOVRRD, // double to two gprs.
85 VMOVDRR, // Two gprs to double.
87 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
88 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
90 TC_RETURN, // Tail call return pseudo.
94 DYN_ALLOC, // Dynamic allocation on the stack.
96 MEMBARRIER_MCR, // Memory barrier (MCR)
100 VCEQ, // Vector compare equal.
101 VCEQZ, // Vector compare equal to zero.
102 VCGE, // Vector compare greater than or equal.
103 VCGEZ, // Vector compare greater than or equal to zero.
104 VCLEZ, // Vector compare less than or equal to zero.
105 VCGEU, // Vector compare unsigned greater than or equal.
106 VCGT, // Vector compare greater than.
107 VCGTZ, // Vector compare greater than zero.
108 VCLTZ, // Vector compare less than zero.
109 VCGTU, // Vector compare unsigned greater than.
110 VTST, // Vector test bits.
112 // Vector shift by immediate:
114 VSHRs, // ...right (signed)
115 VSHRu, // ...right (unsigned)
117 // Vector rounding shift by immediate:
118 VRSHRs, // ...right (signed)
119 VRSHRu, // ...right (unsigned)
120 VRSHRN, // ...right narrow
122 // Vector saturating shift by immediate:
123 VQSHLs, // ...left (signed)
124 VQSHLu, // ...left (unsigned)
125 VQSHLsu, // ...left (signed to unsigned)
126 VQSHRNs, // ...right narrow (signed)
127 VQSHRNu, // ...right narrow (unsigned)
128 VQSHRNsu, // ...right narrow (signed to unsigned)
130 // Vector saturating rounding shift by immediate:
131 VQRSHRNs, // ...right narrow (signed)
132 VQRSHRNu, // ...right narrow (unsigned)
133 VQRSHRNsu, // ...right narrow (signed to unsigned)
135 // Vector shift and insert:
139 // Vector get lane (VMOV scalar to ARM core register)
140 // (These are used for 8- and 16-bit element types only.)
141 VGETLANEu, // zero-extend vector extract element
142 VGETLANEs, // sign-extend vector extract element
144 // Vector move immediate and move negated immediate:
148 // Vector move f32 immediate:
157 VREV64, // reverse elements within 64-bit doublewords
158 VREV32, // reverse elements within 32-bit words
159 VREV16, // reverse elements within 16-bit halfwords
160 VZIP, // zip (interleave)
161 VUZP, // unzip (deinterleave)
163 VTBL1, // 1-register shuffle with mask
164 VTBL2, // 2-register shuffle with mask
166 // Vector multiply long:
168 VMULLu, // ...unsigned
170 UMLAL, // 64bit Unsigned Accumulate Multiply
171 SMLAL, // 64bit Signed Accumulate Multiply
173 // Operands of the standard BUILD_VECTOR node are not legalized, which
174 // is fine if BUILD_VECTORs are always lowered to shuffles or other
175 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
176 // operands need to be legalized. Define an ARM-specific version of
177 // BUILD_VECTOR for this purpose.
180 // Floating-point max and min:
189 // Vector OR with immediate
191 // Vector AND with NOT of immediate
194 // Vector bitwise select
197 // Vector load N-element structure to all lanes:
198 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
202 // NEON loads with post-increment base updates:
214 // NEON stores with post-increment base updates:
225 /// Define some predicates that are used for node matching.
227 bool isBitFieldInvertedMask(unsigned v);
230 //===--------------------------------------------------------------------===//
231 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
233 class ARMTargetLowering : public TargetLowering {
235 explicit ARMTargetLowering(TargetMachine &TM);
237 virtual unsigned getJumpTableEncoding() const;
239 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
241 /// ReplaceNodeResults - Replace the results of node with an illegal result
242 /// type with new values built out of custom code.
244 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
245 SelectionDAG &DAG) const;
247 virtual const char *getTargetNodeName(unsigned Opcode) const;
249 virtual bool isSelectSupported(SelectSupportKind Kind) const {
250 // ARM does not support scalar condition selects on vectors.
251 return (Kind != ScalarCondVectorVal);
254 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
255 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
257 virtual MachineBasicBlock *
258 EmitInstrWithCustomInserter(MachineInstr *MI,
259 MachineBasicBlock *MBB) const;
262 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
264 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
265 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
267 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
269 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
270 /// unaligned memory accesses of the specified type. Returns whether it
271 /// is "fast" by reference in the second argument.
272 virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
275 virtual EVT getOptimalMemOpType(uint64_t Size,
276 unsigned DstAlign, unsigned SrcAlign,
277 bool IsMemset, bool ZeroMemset,
279 MachineFunction &MF) const;
281 using TargetLowering::isZExtFree;
282 virtual bool isZExtFree(SDValue Val, EVT VT2) const;
284 virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
287 /// isLegalAddressingMode - Return true if the addressing mode represented
288 /// by AM is legal for this target, for a load/store of the specified type.
289 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
290 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
292 /// isLegalICmpImmediate - Return true if the specified immediate is legal
293 /// icmp immediate, that is the target has icmp instructions which can
294 /// compare a register against the immediate without having to materialize
295 /// the immediate into a register.
296 virtual bool isLegalICmpImmediate(int64_t Imm) const;
298 /// isLegalAddImmediate - Return true if the specified immediate is legal
299 /// add immediate, that is the target has add instructions which can
300 /// add a register and the immediate without having to materialize
301 /// the immediate into a register.
302 virtual bool isLegalAddImmediate(int64_t Imm) const;
304 /// getPreIndexedAddressParts - returns true by value, base pointer and
305 /// offset pointer and addressing mode by reference if the node's address
306 /// can be legally represented as pre-indexed load / store address.
307 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
309 ISD::MemIndexedMode &AM,
310 SelectionDAG &DAG) const;
312 /// getPostIndexedAddressParts - returns true by value, base pointer and
313 /// offset pointer and addressing mode by reference if this node can be
314 /// combined with a load / store to form a post-indexed load / store.
315 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
316 SDValue &Base, SDValue &Offset,
317 ISD::MemIndexedMode &AM,
318 SelectionDAG &DAG) const;
320 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
323 const SelectionDAG &DAG,
324 unsigned Depth) const;
327 virtual bool ExpandInlineAsm(CallInst *CI) const;
329 ConstraintType getConstraintType(const std::string &Constraint) const;
331 /// Examine constraint string and operand type and determine a weight value.
332 /// The operand object must already have been set up with the operand type.
333 ConstraintWeight getSingleConstraintMatchWeight(
334 AsmOperandInfo &info, const char *constraint) const;
336 std::pair<unsigned, const TargetRegisterClass*>
337 getRegForInlineAsmConstraint(const std::string &Constraint,
340 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
341 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
342 /// true it means one of the asm constraint of the inline asm instruction
343 /// being processed is 'm'.
344 virtual void LowerAsmOperandForConstraint(SDValue Op,
345 std::string &Constraint,
346 std::vector<SDValue> &Ops,
347 SelectionDAG &DAG) const;
349 const ARMSubtarget* getSubtarget() const {
353 /// getRegClassFor - Return the register class that should be used for the
354 /// specified value type.
355 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const;
357 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
358 /// be used for loads / stores from the global.
359 virtual unsigned getMaximalGlobalOffset() const;
361 /// Returns true if a cast between SrcAS and DestAS is a noop.
362 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
363 // Addrspacecasts are always noops.
367 /// createFastISel - This method returns a target specific FastISel object,
368 /// or null if the target does not support "fast" ISel.
369 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
370 const TargetLibraryInfo *libInfo) const;
372 Sched::Preference getSchedulingPreference(SDNode *N) const;
374 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
375 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
377 /// isFPImmLegal - Returns true if the target can instruction select the
378 /// specified FP immediate natively. If false, the legalizer will
379 /// materialize the FP immediate as a load from a constant pool.
380 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
382 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
384 unsigned Intrinsic) const;
386 /// \brief Returns true if it is beneficial to convert a load of a constant
387 /// to just the constant itself.
388 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
392 std::pair<const TargetRegisterClass*, uint8_t>
393 findRepresentativeClass(MVT VT) const;
396 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
397 /// make the right decision when generating code for different targets.
398 const ARMSubtarget *Subtarget;
400 const TargetRegisterInfo *RegInfo;
402 const InstrItineraryData *Itins;
404 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
406 unsigned ARMPCLabelIndex;
408 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
409 void addDRTypeForNEON(MVT VT);
410 void addQRTypeForNEON(MVT VT);
412 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
413 void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
414 SDValue Chain, SDValue &Arg,
415 RegsToPassVector &RegsToPass,
416 CCValAssign &VA, CCValAssign &NextVA,
418 SmallVectorImpl<SDValue> &MemOpChains,
419 ISD::ArgFlagsTy Flags) const;
420 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
421 SDValue &Root, SelectionDAG &DAG,
424 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
425 bool isVarArg) const;
426 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
427 SDLoc dl, SelectionDAG &DAG,
428 const CCValAssign &VA,
429 ISD::ArgFlagsTy Flags) const;
430 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
431 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
432 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
433 const ARMSubtarget *Subtarget) const;
434 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
435 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
436 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
437 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
438 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
439 SelectionDAG &DAG) const;
440 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
442 TLSModel::Model model) const;
443 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
444 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
445 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
446 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
447 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
448 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
449 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
450 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
451 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
452 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
453 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
454 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
455 const ARMSubtarget *ST) const;
456 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
457 const ARMSubtarget *ST) const;
458 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
459 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
461 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
462 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
463 /// expanded to FMAs when this method returns true, otherwise fmuladd is
464 /// expanded to fmul + fadd.
466 /// ARM supports both fused and unfused multiply-add operations; we already
467 /// lower a pair of fmul and fadd to the latter so it's not clear that there
468 /// would be a gain or that the gain would be worthwhile enough to risk
469 /// correctness bugs.
470 virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; }
472 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
475 CallingConv::ID CallConv, bool isVarArg,
476 const SmallVectorImpl<ISD::InputArg> &Ins,
477 SDLoc dl, SelectionDAG &DAG,
478 SmallVectorImpl<SDValue> &InVals,
479 bool isThisReturn, SDValue ThisVal) const;
482 LowerFormalArguments(SDValue Chain,
483 CallingConv::ID CallConv, bool isVarArg,
484 const SmallVectorImpl<ISD::InputArg> &Ins,
485 SDLoc dl, SelectionDAG &DAG,
486 SmallVectorImpl<SDValue> &InVals) const;
488 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
489 SDLoc dl, SDValue &Chain,
490 const Value *OrigArg,
491 unsigned InRegsParamRecordIdx,
492 unsigned OffsetFromOrigArg,
495 bool ForceMutable) const;
497 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
498 SDLoc dl, SDValue &Chain,
500 bool ForceMutable = false) const;
502 void computeRegArea(CCState &CCInfo, MachineFunction &MF,
503 unsigned InRegsParamRecordIdx,
505 unsigned &ArgRegsSize,
506 unsigned &ArgRegsSaveSize) const;
509 LowerCall(TargetLowering::CallLoweringInfo &CLI,
510 SmallVectorImpl<SDValue> &InVals) const;
512 /// HandleByVal - Target-specific cleanup for ByVal support.
513 virtual void HandleByVal(CCState *, unsigned &, unsigned) const;
515 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
516 /// for tail call optimization. Targets which want to do tail call
517 /// optimization should implement this function.
518 bool IsEligibleForTailCallOptimization(SDValue Callee,
519 CallingConv::ID CalleeCC,
521 bool isCalleeStructRet,
522 bool isCallerStructRet,
523 const SmallVectorImpl<ISD::OutputArg> &Outs,
524 const SmallVectorImpl<SDValue> &OutVals,
525 const SmallVectorImpl<ISD::InputArg> &Ins,
526 SelectionDAG& DAG) const;
528 virtual bool CanLowerReturn(CallingConv::ID CallConv,
529 MachineFunction &MF, bool isVarArg,
530 const SmallVectorImpl<ISD::OutputArg> &Outs,
531 LLVMContext &Context) const;
534 LowerReturn(SDValue Chain,
535 CallingConv::ID CallConv, bool isVarArg,
536 const SmallVectorImpl<ISD::OutputArg> &Outs,
537 const SmallVectorImpl<SDValue> &OutVals,
538 SDLoc dl, SelectionDAG &DAG) const;
540 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
542 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
544 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
545 SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
546 SDValue getVFPCmp(SDValue LHS, SDValue RHS,
547 SelectionDAG &DAG, SDLoc dl) const;
548 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
550 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
552 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
553 MachineBasicBlock *BB,
554 unsigned Size) const;
555 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
556 MachineBasicBlock *BB,
558 unsigned BinOpcode) const;
559 MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI,
560 MachineBasicBlock *BB,
563 bool NeedsCarry = false,
564 bool IsCmpxchg = false,
565 bool IsMinMax = false,
566 ARMCC::CondCodes CC = ARMCC::AL) const;
567 MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI,
568 MachineBasicBlock *BB,
571 ARMCC::CondCodes Cond) const;
572 MachineBasicBlock *EmitAtomicLoad64(MachineInstr *MI,
573 MachineBasicBlock *BB) const;
575 void SetupEntryBlockForSjLj(MachineInstr *MI,
576 MachineBasicBlock *MBB,
577 MachineBasicBlock *DispatchBB, int FI) const;
579 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI,
580 MachineBasicBlock *MBB) const;
582 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
584 MachineBasicBlock *EmitStructByval(MachineInstr *MI,
585 MachineBasicBlock *MBB) const;
588 enum NEONModImmType {
596 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
597 const TargetLibraryInfo *libInfo);
601 #endif // ARMISELLOWERING_H