1 //===-- ARMTargetTransformInfo.h - ARM specific TTI -------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file a TargetTransformInfo::Concept conforming object specific to the
11 /// ARM target machine. It uses the target's detailed information to
12 /// provide more precise answers to certain TTI queries, while letting the
13 /// target independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
18 #define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
21 #include "ARMTargetMachine.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/CodeGen/BasicTTIImpl.h"
24 #include "llvm/Target/TargetLowering.h"
28 class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
29 typedef BasicTTIImplBase<ARMTTIImpl> BaseT;
30 typedef TargetTransformInfo TTI;
33 const ARMSubtarget *ST;
34 const ARMTargetLowering *TLI;
36 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
37 /// are set if the result needs to be inserted and/or extracted from vectors.
38 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract);
40 const ARMSubtarget *getST() const { return ST; }
41 const ARMTargetLowering *getTLI() const { return TLI; }
44 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
45 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
46 TLI(ST->getTargetLowering()) {}
48 // Provide value semantics. MSVC requires that we spell all of these out.
49 ARMTTIImpl(const ARMTTIImpl &Arg)
50 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
51 ARMTTIImpl(ARMTTIImpl &&Arg)
52 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
53 TLI(std::move(Arg.TLI)) {}
55 bool enableInterleavedAccessVectorization() { return true; }
57 /// \name Scalar TTI Implementations
60 using BaseT::getIntImmCost;
61 int getIntImmCost(const APInt &Imm, Type *Ty);
65 /// \name Vector TTI Implementations
68 unsigned getNumberOfRegisters(bool Vector) {
75 if (ST->isThumb1Only())
80 unsigned getRegisterBitWidth(bool Vector) {
90 unsigned getMaxInterleaveFactor(unsigned VF) {
91 // These are out of order CPUs:
92 if (ST->isCortexA15() || ST->isSwift())
97 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
99 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
101 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
103 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
105 int getAddressComputationCost(Type *Val, bool IsComplex);
107 int getFPOpCost(Type *Ty);
109 int getArithmeticInstrCost(
110 unsigned Opcode, Type *Ty,
111 TTI::OperandValueKind Op1Info = TTI::OK_AnyValue,
112 TTI::OperandValueKind Op2Info = TTI::OK_AnyValue,
113 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
114 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
116 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
117 unsigned AddressSpace);
119 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
120 ArrayRef<unsigned> Indices, unsigned Alignment,
121 unsigned AddressSpace);
125 } // end namespace llvm