X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86Subtarget.h;h=eb587a5761396378e05c3ace256e4b7a6bd43a33;hb=7fd324a31fbfd237f43d38d3a780a19fbf909ba3;hp=6841c5bafa32fab96a8ccaaef5be427be1be5aa7;hpb=a20e1e7ef596842127794372244fd5c646f71296;p=oota-llvm.git diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index 6841c5bafa3..eb587a57613 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -14,8 +14,8 @@ #ifndef X86SUBTARGET_H #define X86SUBTARGET_H -#include "llvm/CallingConv.h" #include "llvm/ADT/Triple.h" +#include "llvm/IR/CallingConv.h" #include "llvm/Target/TargetSubtargetInfo.h" #include @@ -118,6 +118,9 @@ protected: /// HasBMI2 - Processor has BMI2 instructions. bool HasBMI2; + /// HasRTM - Processor has RTM instructions. + bool HasRTM; + /// IsBTMemSlow - True if BT (bit test) of memory instructions are slow. bool IsBTMemSlow; @@ -136,9 +139,17 @@ protected: /// the stack pointer. This is an optimization for Intel Atom processors. bool UseLeaForSP; + /// HasSlowDivide - True if smaller divides are significantly faster than + /// full divides and should be used when possible. + bool HasSlowDivide; + /// PostRAScheduler - True if using post-register-allocation scheduler. bool PostRAScheduler; + /// PadShortFunctions - True if the short functions should be padded to prevent + /// a stall when returning too early. + bool PadShortFunctions; + /// stackAlignment - The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. unsigned stackAlignment; @@ -183,7 +194,20 @@ public: /// instruction. void AutoDetectSubtargetFeatures(); - bool is64Bit() const { return In64BitMode; } + /// Is this x86_64? (disregarding specific ABI / programming model) + bool is64Bit() const { + return In64BitMode; + } + + /// Is this x86_64 with the ILP32 programming model (x32 ABI)? + bool isTarget64BitILP32() const { + return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32); + } + + /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)? + bool isTarget64BitLP64() const { + return In64BitMode && (TargetTriple.getEnvironment() != Triple::GNUX32); + } PICStyles::Style getPICStyle() const { return PICStyle; } void setPICStyle(PICStyles::Style Style) { PICStyle = Style; } @@ -198,6 +222,8 @@ public: bool hasSSE42() const { return X86SSELevel >= SSE42; } bool hasAVX() const { return X86SSELevel >= AVX; } bool hasAVX2() const { return X86SSELevel >= AVX2; } + bool hasFp256() const { return hasAVX(); } + bool hasInt256() const { return hasAVX2(); } bool hasSSE4A() const { return HasSSE4A; } bool has3DNow() const { return X863DNowLevel >= ThreeDNow; } bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; } @@ -205,7 +231,8 @@ public: bool hasAES() const { return HasAES; } bool hasPCLMUL() const { return HasPCLMUL; } bool hasFMA() const { return HasFMA; } - bool hasFMA4() const { return HasFMA4; } + // FIXME: Favor FMA when both are enabled. Is this the right thing to do? + bool hasFMA4() const { return HasFMA4 && !HasFMA; } bool hasXOP() const { return HasXOP; } bool hasMOVBE() const { return HasMOVBE; } bool hasRDRAND() const { return HasRDRAND; } @@ -214,11 +241,14 @@ public: bool hasLZCNT() const { return HasLZCNT; } bool hasBMI() const { return HasBMI; } bool hasBMI2() const { return HasBMI2; } + bool hasRTM() const { return HasRTM; } bool isBTMemSlow() const { return IsBTMemSlow; } bool isUnalignedMemAccessFast() const { return IsUAMemFast; } bool hasVectorUAMem() const { return HasVectorUAMem; } bool hasCmpxchg16b() const { return HasCmpxchg16b; } bool useLeaForSP() const { return UseLeaForSP; } + bool hasSlowDivide() const { return HasSlowDivide; } + bool padShortFunctions() const { return PadShortFunctions; } bool isAtom() const { return X86ProcFamily == IntelAtom; } @@ -231,13 +261,13 @@ public: bool isTargetSolaris() const { return TargetTriple.getOS() == Triple::Solaris; } - - // ELF is a reasonably sane default and the only other X86 targets we - // support are Darwin and Windows. Just use "not those". - bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); } + bool isTargetELF() const { + return (TargetTriple.getEnvironment() == Triple::ELF || + TargetTriple.isOSBinFormatELF()); + } bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; } bool isTargetNaCl() const { - return TargetTriple.getOS() == Triple::NativeClient; + return TargetTriple.getOS() == Triple::NaCl; } bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); } bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); } @@ -245,7 +275,10 @@ public: bool isTargetMingw() const { return TargetTriple.getOS() == Triple::MinGW32; } bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; } bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); } - bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); } + bool isTargetCOFF() const { + return (TargetTriple.getEnvironment() != Triple::ELF && + TargetTriple.isOSBinFormatCOFF()); + } bool isTargetEnvMacho() const { return TargetTriple.isEnvironmentMachO(); } bool isTargetWin64() const { @@ -295,12 +328,10 @@ public: /// memset with zero passed as the second argument. Otherwise it /// returns null. const char *getBZeroEntry() const; - - /// getSpecialAddressLatency - For targets where it is beneficial to - /// backschedule instructions that compute addresses, return a value - /// indicating the number of scheduling cycles of backscheduling that - /// should be attempted. - unsigned getSpecialAddressLatency() const; + + /// This function returns true if the target has sincos() routine in its + /// compiler runtime or math libraries. + bool hasSinCos() const; /// enablePostRAScheduler - run for Atom optimization. bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,