1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/Target/TargetLowering.h"
27 namespace AArch64ISD {
29 enum NodeType : unsigned {
30 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32 CALL, // Function call.
34 // Produces the full sequence of instructions for getting the thread pointer
35 // offset of a variable into X0, using the TLSDesc model.
37 ADRP, // Page address of a TargetGlobalAddress operand.
38 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND, // Conditional branch instruction; "b.cond".
44 FCSEL, // Conditional move instruction.
45 CSINV, // Conditional select invert.
46 CSNEG, // Conditional select negate.
47 CSINC, // Conditional select increment.
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
53 SBC, // adc, sbc instructions
55 // Arithmetic instructions which write flags.
62 // Conditional compares. Operands: left,right,falsecc,cc,flags
67 // Floating point comparison
73 // Scalar-to-vector duplication
80 // Vector immedate moves
89 // Vector immediate ops
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
97 // Vector arithmetic negation
112 // Vector shift by scalar
117 // Vector shift by scalar (again)
124 // Vector comparisons
134 // Vector zero comparisons
146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
158 // Vector bitwise negation
161 // Vector bitwise selection
164 // Compare-and-branch
173 // Custom prefetch handling
176 // {s|u}int to FP within a FP register.
180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
190 // NEON Load/Store with post-increment base updates
191 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
216 } // end namespace AArch64ISD
218 class AArch64Subtarget;
219 class AArch64TargetMachine;
221 class AArch64TargetLowering : public TargetLowering {
223 explicit AArch64TargetLowering(const TargetMachine &TM,
224 const AArch64Subtarget &STI);
226 /// Selects the correct CCAssignFn for a given CallingConvention value.
227 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
229 /// Determine which of the bits specified in Mask are known to be either zero
230 /// or one and return them in the KnownZero/KnownOne bitsets.
231 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
232 APInt &KnownOne, const SelectionDAG &DAG,
233 unsigned Depth = 0) const override;
235 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
237 /// Returns true if the target allows unaligned memory accesses of the
239 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
241 bool *Fast = nullptr) const override;
243 /// Provide custom lowering hooks for some operations.
244 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
246 const char *getTargetNodeName(unsigned Opcode) const override;
248 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
250 /// Returns true if a cast between SrcAS and DestAS is a noop.
251 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
252 // Addrspacecasts are always noops.
256 /// This method returns a target specific FastISel object, or null if the
257 /// target does not support "fast" ISel.
258 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
259 const TargetLibraryInfo *libInfo) const override;
261 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
263 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
265 /// Return true if the given shuffle mask can be codegen'd directly, or if it
266 /// should be stack expanded.
267 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
269 /// Return the ISD::SETCC ValueType.
270 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
271 EVT VT) const override;
273 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
275 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
276 MachineBasicBlock *BB) const;
279 EmitInstrWithCustomInserter(MachineInstr *MI,
280 MachineBasicBlock *MBB) const override;
282 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
283 unsigned Intrinsic) const override;
285 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
286 bool isTruncateFree(EVT VT1, EVT VT2) const override;
288 bool isProfitableToHoist(Instruction *I) const override;
290 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
291 bool isZExtFree(EVT VT1, EVT VT2) const override;
292 bool isZExtFree(SDValue Val, EVT VT2) const override;
294 bool hasPairedLoad(Type *LoadedType,
295 unsigned &RequiredAligment) const override;
296 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
298 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
300 bool lowerInterleavedLoad(LoadInst *LI,
301 ArrayRef<ShuffleVectorInst *> Shuffles,
302 ArrayRef<unsigned> Indices,
303 unsigned Factor) const override;
304 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
305 unsigned Factor) const override;
307 bool isLegalAddImmediate(int64_t) const override;
308 bool isLegalICmpImmediate(int64_t) const override;
310 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
311 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
312 MachineFunction &MF) const override;
314 /// Return true if the addressing mode represented by AM is legal for this
315 /// target, for a load/store of the specified type.
316 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
317 unsigned AS) const override;
319 /// \brief Return the cost of the scaling factor used in the addressing
320 /// mode represented by AM for this target, for a load/store
321 /// of the specified type.
322 /// If the AM is supported, the return value must be >= 0.
323 /// If the AM is not supported, it returns a negative value.
324 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
325 unsigned AS) const override;
327 /// Return true if an FMA operation is faster than a pair of fmul and fadd
328 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
329 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
330 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
332 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
334 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
335 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
337 /// \brief Returns true if it is beneficial to convert a load of a constant
338 /// to just the constant itself.
339 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
340 Type *Ty) const override;
342 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
343 AtomicOrdering Ord) const override;
344 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
345 Value *Addr, AtomicOrdering Ord) const override;
347 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
349 TargetLoweringBase::AtomicExpansionKind
350 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
351 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
352 TargetLoweringBase::AtomicExpansionKind
353 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
355 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
357 bool useLoadStackGuardNode() const override;
358 TargetLoweringBase::LegalizeTypeAction
359 getPreferredVectorAction(EVT VT) const override;
361 /// If the target has a standard location for the unsafe stack pointer,
362 /// returns the address of that location. Otherwise, returns nullptr.
363 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
365 /// If a physical register, this returns the register that receives the
366 /// exception address on entry to an EH pad.
368 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
369 // FIXME: This is a guess. Has this been defined yet?
373 /// If a physical register, this returns the register that receives the
374 /// exception typeid on entry to a landing pad.
376 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
377 // FIXME: This is a guess. Has this been defined yet?
381 bool isCheapToSpeculateCttz() const override {
385 bool isCheapToSpeculateCtlz() const override {
388 bool supportSplitCSR(MachineFunction *MF) const override {
389 return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
390 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
392 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
393 void insertCopiesSplitCSR(
394 MachineBasicBlock *Entry,
395 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
398 bool isExtFreeImpl(const Instruction *Ext) const override;
400 /// Keep a pointer to the AArch64Subtarget around so that we can
401 /// make the right decision when generating code for different targets.
402 const AArch64Subtarget *Subtarget;
404 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
405 void addDRTypeForNEON(MVT VT);
406 void addQRTypeForNEON(MVT VT);
409 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
410 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
412 SmallVectorImpl<SDValue> &InVals) const override;
414 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
415 SmallVectorImpl<SDValue> &InVals) const override;
417 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
418 CallingConv::ID CallConv, bool isVarArg,
419 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
420 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
421 bool isThisReturn, SDValue ThisVal) const;
423 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
425 bool isEligibleForTailCallOptimization(
426 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
427 bool isCalleeStructRet, bool isCallerStructRet,
428 const SmallVectorImpl<ISD::OutputArg> &Outs,
429 const SmallVectorImpl<SDValue> &OutVals,
430 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
432 /// Finds the incoming stack arguments which overlap the given fixed stack
433 /// object and incorporates their load into the current chain. This prevents
434 /// an upcoming store from clobbering the stack argument before it's used.
435 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
436 MachineFrameInfo *MFI, int ClobberedFI) const;
438 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
440 bool IsTailCallConvention(CallingConv::ID CallCC) const;
442 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
443 SDValue &Chain) const;
445 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
447 const SmallVectorImpl<ISD::OutputArg> &Outs,
448 LLVMContext &Context) const override;
450 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
451 const SmallVectorImpl<ISD::OutputArg> &Outs,
452 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
453 SelectionDAG &DAG) const override;
455 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
456 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
457 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
458 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
459 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
460 SelectionDAG &DAG) const;
461 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
462 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
463 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
466 SDValue TVal, SDValue FVal, SDLoc dl,
467 SelectionDAG &DAG) const;
468 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
469 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
475 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
476 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
477 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
478 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
479 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
480 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
481 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
482 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
483 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
484 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
485 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
486 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
487 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
488 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
489 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
490 RTLIB::Libcall Call) const;
491 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
492 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
493 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
494 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
495 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
496 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
497 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
498 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
499 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
501 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
502 std::vector<SDNode *> *Created) const override;
503 unsigned combineRepeatedFPDivisors() const override;
505 ConstraintType getConstraintType(StringRef Constraint) const override;
506 unsigned getRegisterByName(const char* RegName, EVT VT,
507 SelectionDAG &DAG) const override;
509 /// Examine constraint string and operand type and determine a weight value.
510 /// The operand object must already have been set up with the operand type.
512 getSingleConstraintMatchWeight(AsmOperandInfo &info,
513 const char *constraint) const override;
515 std::pair<unsigned, const TargetRegisterClass *>
516 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
517 StringRef Constraint, MVT VT) const override;
518 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
519 std::vector<SDValue> &Ops,
520 SelectionDAG &DAG) const override;
522 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
523 if (ConstraintCode == "Q")
524 return InlineAsm::Constraint_Q;
525 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
526 // followed by llvm_unreachable so we'll leave them unimplemented in
527 // the backend for now.
528 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
531 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
532 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
533 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
534 ISD::MemIndexedMode &AM, bool &IsInc,
535 SelectionDAG &DAG) const;
536 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
537 ISD::MemIndexedMode &AM,
538 SelectionDAG &DAG) const override;
539 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
540 SDValue &Offset, ISD::MemIndexedMode &AM,
541 SelectionDAG &DAG) const override;
543 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
544 SelectionDAG &DAG) const override;
546 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
547 CallingConv::ID CallConv,
548 bool isVarArg) const override;
550 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
554 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
555 const TargetLibraryInfo *libInfo);
556 } // end namespace AArch64
558 } // end namespace llvm