X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FARM%2FARMTargetMachine.cpp;h=91e22dc16fc70257b7bb9bc43ee9a59f6beedc94;hb=b84f890bc31460a416805272ca4799db9ed4b6b1;hp=0c9e0a472013218ce522b47f11df66f6d5957670;hpb=c4bc055bb43e09d3952a947e23fa21886bd1ef3c;p=oota-llvm.git diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 0c9e0a47201..91e22dc16fc 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -10,8 +10,8 @@ // //===----------------------------------------------------------------------===// -#include "ARMTargetMachine.h" #include "ARM.h" +#include "ARMTargetMachine.h" #include "ARMFrameLowering.h" #include "llvm/CodeGen/Passes.h" #include "llvm/MC/MCAsmInfo.h" @@ -23,11 +23,6 @@ #include "llvm/Transforms/Scalar.h" using namespace llvm; -static cl::opt -EnableGlobalMerge("global-merge", cl::Hidden, - cl::desc("Enable global merge pass"), - cl::init(true)); - static cl::opt DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, cl::desc("Inhibit optimization of S->D register accesses on A15"), @@ -35,8 +30,10 @@ DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, extern "C" void LLVMInitializeARMTarget() { // Register the target. - RegisterTargetMachine X(TheARMTarget); - RegisterTargetMachine Y(TheThumbTarget); + RegisterTargetMachine X(TheARMLETarget); + RegisterTargetMachine Y(TheARMBETarget); + RegisterTargetMachine A(TheThumbLETarget); + RegisterTargetMachine B(TheThumbBETarget); } @@ -46,14 +43,17 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) + CodeGenOpt::Level OL, + bool isLittle) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - Subtarget(TT, CPU, FS, Options), + Subtarget(TT, CPU, FS, isLittle, Options), JITInfo(), InstrItins(Subtarget.getInstrItineraryData()) { - // Default to soft float ABI + + // Default to triple-appropriate float ABI if (Options.FloatABIType == FloatABI::Default) - this->Options.FloatABIType = FloatABI::Soft; + this->Options.FloatABIType = + Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; } void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) { @@ -68,21 +68,33 @@ void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) { void ARMTargetMachine::anchor() { } static std::string computeDataLayout(ARMSubtarget &ST) { - // Little endian. Pointers are 32 bits and aligned to 32 bits. - std::string Ret = "e-p:32:32"; + std::string Ret = ""; + + if (ST.isLittle()) + // Little endian. + Ret += "e"; + else + // Big endian. + Ret += "E"; + + Ret += DataLayout::getManglingComponent(ST.getTargetTriple()); + + // Pointers are 32 bits and aligned to 32 bits. + Ret += "-p:32:32"; // On thumb, i16,i18 and i1 have natural aligment requirements, but we try to // align to 32. if (ST.isThumb()) Ret += "-i1:8:32-i8:8:32-i16:16:32"; - // We have 64 bits floats and integers. The APCS ABI requires them to be - // aligned s them to 32 bits, others to 64 bits. We always try to align to - // 64 bits. + // ABIs other than APCS have 64 bit integers with natural alignment. + if (!ST.isAPCS_ABI()) + Ret += "-i64:64"; + + // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 + // bits, others to 64 bits. We always try to align to 64 bits. if (ST.isAPCS_ABI()) Ret += "-f64:32:64"; - else - Ret += "-i64:64"; // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others // to 64. We always ty to give them natural alignment. @@ -91,16 +103,19 @@ static std::string computeDataLayout(ARMSubtarget &ST) { else Ret += "-v128:64:128"; - // An aggregate of size 0 is ABI aligned to 0. - // FIXME: explain better what this means. - if (ST.isThumb()) + // On thumb and APCS, only try to align aggregates to 32 bits (the default is + // 64 bits). + if (ST.isThumb() || ST.isAPCS_ABI()) Ret += "-a:0:32"; // Integer registers are 32 bits. Ret += "-n32"; - // The stack is 64 bit aligned on AAPCS and 32 bit aligned everywhere else. - if (ST.isAAPCS_ABI()) + // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit + // aligned everywhere else. + if (ST.isTargetNaCl()) + Ret += "-S128"; + else if (ST.isAAPCS_ABI()) Ret += "-S64"; else Ret += "-S32"; @@ -112,8 +127,9 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) - : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + CodeGenOpt::Level OL, + bool isLittle) + : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle), InstrInfo(Subtarget), DL(computeDataLayout(Subtarget)), TLInfo(*this), @@ -125,14 +141,33 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, "support ARM mode execution!"); } +void ARMLETargetMachine::anchor() { } + +ARMLETargetMachine:: +ARMLETargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} + +void ARMBETargetMachine::anchor() { } + +ARMBETargetMachine:: +ARMBETargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} + void ThumbTargetMachine::anchor() { } ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) - : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + CodeGenOpt::Level OL, + bool isLittle) + : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle), InstrInfo(Subtarget.hasThumb2() ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget)) : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))), @@ -145,6 +180,24 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT, initAsmInfo(); } +void ThumbLETargetMachine::anchor() { } + +ThumbLETargetMachine:: +ThumbLETargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} + +void ThumbBETargetMachine::anchor() { } + +ThumbBETargetMachine:: +ThumbBETargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} + namespace { /// ARM Code Generator Pass Configuration Options. class ARMPassConfig : public TargetPassConfig { @@ -160,11 +213,11 @@ public: return *getARMTargetMachine().getSubtargetImpl(); } - virtual bool addPreISel(); - virtual bool addInstSelector(); - virtual bool addPreRegAlloc(); - virtual bool addPreSched2(); - virtual bool addPreEmitPass(); + bool addPreISel() override; + bool addInstSelector() override; + bool addPreRegAlloc() override; + bool addPreSched2() override; + bool addPreEmitPass() override; }; } // namespace @@ -173,7 +226,11 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { } bool ARMPassConfig::addPreISel() { - if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge) + const ARMSubtarget *Subtarget = &getARMSubtarget(); + if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) + addPass(createAtomicExpandLoadLinkedPass(TM)); + + if (TM->getOptLevel() != CodeGenOpt::None) addPass(createGlobalMergePass(TM)); return false; @@ -243,6 +300,7 @@ bool ARMPassConfig::addPreEmitPass() { addPass(&UnpackMachineBundlesID); } + addPass(createARMOptimizeBarriersPass()); addPass(createARMConstantIslandPass()); return true;