#include "X86TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
return make_unique<TargetLoweringObjectFileMachO>();
}
- if (TT.isOSLinux())
- return make_unique<X86LinuxTargetObjectFile>();
+ if (TT.isOSLinux() || TT.isOSNaCl())
+ return make_unique<X86LinuxNaClTargetObjectFile>();
if (TT.isOSBinFormatELF())
- return make_unique<TargetLoweringObjectFileELF>();
+ return make_unique<X86ELFTargetObjectFile>();
if (TT.isKnownWindowsMSVCEnvironment())
return make_unique<X86WindowsTargetObjectFile>();
if (TT.isOSBinFormatCOFF())
// The stack is aligned to 32 bits on some ABIs and 128 bits on others.
if (!TT.isArch64Bit() && TT.isOSWindows())
- Ret += "-S32";
+ Ret += "-a:0:32-S32";
else
Ret += "-S128";
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ : LLVMTargetMachine(T, computeDataLayout(Triple(TT)), TT, CPU, FS, Options,
+ RM, CM, OL),
TLOF(createTLOF(Triple(getTargetTriple()))),
- DL(computeDataLayout(Triple(TT))),
Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) {
- // default to hard float ABI
- if (Options.FloatABIType == FloatABI::Default)
- this->Options.FloatABIType = FloatABI::Hard;
-
// Windows stack unwinder gets confused when execution flow "falls through"
// after a call to 'noreturn' function.
// To prevent that, we emit a trap for 'unreachable' IR instructions.
if (Subtarget.isTargetWin64())
this->Options.TrapUnreachable = true;
+ // TODO: By default, all reciprocal estimate operations are off because
+ // that matches the behavior before TargetRecip was added (except for btver2
+ // which used subtarget features to enable this type of codegen).
+ // We should change this to match GCC behavior where everything but
+ // scalar division estimates are turned on by default with -ffast-math.
+ this->Options.Reciprocals.setDefaults("all", false, 1);
+
initAsmInfo();
}
const X86Subtarget *
X86TargetMachine::getSubtargetImpl(const Function &F) const {
- AttributeSet FnAttrs = F.getAttributes();
- Attribute CPUAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
- Attribute FSAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
// function before we can generate a subtarget. We also need to use
// it as a key for the subtarget since that can be the only difference
// between two functions.
- Attribute SFAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "use-soft-float");
- bool SoftFloat = !SFAttr.hasAttribute(Attribute::None)
- ? SFAttr.getValueAsString() == "true"
- : Options.UseSoftFloat;
-
- auto &I = SubtargetMap[CPU + FS + (SoftFloat ? "use-soft-float=true"
- : "use-soft-float=false")];
+ bool SoftFloat =
+ F.hasFnAttribute("use-soft-float") &&
+ F.getFnAttribute("use-soft-float").getValueAsString() == "true";
+ // If the soft float attribute is set on the function turn on the soft float
+ // subtarget feature.
+ if (SoftFloat)
+ FS += FS.empty() ? "+soft-float" : ",+soft-float";
+
+ auto &I = SubtargetMap[CPU + FS];
if (!I) {
// This needs to be done before we create a new subtarget since any
// creation will depend on the TM and the code generation flags on the
return getTM<X86TargetMachine>();
}
- const X86Subtarget &getX86Subtarget() const {
- return *getX86TargetMachine().getSubtargetImpl();
- }
-
void addIRPasses() override;
bool addInstSelector() override;
bool addILPOpts() override;
+ bool addPreISel() override;
void addPreRegAlloc() override;
void addPostRegAlloc() override;
void addPreEmitPass() override;
+ void addPreSched2() override;
};
} // namespace
addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
// For ELF, cleanup any local-dynamic TLS accesses.
- if (getX86Subtarget().isTargetELF() && getOptLevel() != CodeGenOpt::None)
+ if (Triple(TM->getTargetTriple()).isOSBinFormatELF() &&
+ getOptLevel() != CodeGenOpt::None)
addPass(createCleanupLocalDynamicTLSPass());
addPass(createX86GlobalBaseRegPass());
return true;
}
+bool X86PassConfig::addPreISel() {
+ // Only add this pass for 32-bit x86 Windows.
+ Triple TT(TM->getTargetTriple());
+ if (TT.isOSWindows() && TT.getArch() == Triple::x86)
+ addPass(createX86WinEHStatePass());
+ return true;
+}
+
void X86PassConfig::addPreRegAlloc() {
addPass(createX86CallFrameOptimization());
}
addPass(createX86FloatingPointStackifierPass());
}
+void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); }
+
void X86PassConfig::addPreEmitPass() {
- if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2())
+ if (getOptLevel() != CodeGenOpt::None)
addPass(createExecutionDependencyFixPass(&X86::VR128RegClass));
if (UseVZeroUpper)