From e9256e340bfde926e8da41c9794907c7a586c997 Mon Sep 17 00:00:00 2001 From: Matthias Braun Date: Thu, 11 Dec 2014 19:42:09 +0000 Subject: [PATCH] Enable machineverifier in debug mode for X86, ARM, AArch64, Mips git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224043 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64TargetMachine.cpp | 10 +++++----- lib/Target/ARM/ARMTargetMachine.cpp | 20 ++++++++++---------- lib/Target/Mips/MipsTargetMachine.cpp | 4 ++-- lib/Target/X86/X86TargetMachine.cpp | 6 +++--- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp index 188a975d59c..d4f19d2abd8 100644 --- a/lib/Target/AArch64/AArch64TargetMachine.cpp +++ b/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -270,7 +270,7 @@ bool AArch64PassConfig::addILPOpts() { void AArch64PassConfig::addPreRegAlloc() { // Use AdvSIMD scalar instructions whenever profitable. if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { - addPass(createAArch64AdvSIMDScalar(), false); + addPass(createAArch64AdvSIMDScalar()); // The AdvSIMD pass may produce copies that can be rewritten to // be register coaleascer friendly. addPass(&PeepholeOptimizerID); @@ -280,7 +280,7 @@ void AArch64PassConfig::addPreRegAlloc() { void AArch64PassConfig::addPostRegAlloc() { // Change dead register definitions to refer to the zero register. if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) - addPass(createAArch64DeadRegisterDefinitions(), false); + addPass(createAArch64DeadRegisterDefinitions()); if (TM->getOptLevel() != CodeGenOpt::None && (TM->getSubtarget().isCortexA53() || TM->getSubtarget().isCortexA57()) && @@ -291,7 +291,7 @@ void AArch64PassConfig::addPostRegAlloc() { void AArch64PassConfig::addPreSched2() { // Expand some pseudo instructions to allow proper scheduling. - addPass(createAArch64ExpandPseudoPass(), false); + addPass(createAArch64ExpandPseudoPass()); // Use load/store pair instructions when possible. if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt) addPass(createAArch64LoadStoreOptimizationPass()); @@ -299,10 +299,10 @@ void AArch64PassConfig::addPreSched2() { void AArch64PassConfig::addPreEmitPass() { if (EnableA53Fix835769) - addPass(createAArch64A53Fix835769(), false); + addPass(createAArch64A53Fix835769()); // Relax conditional branch instructions if they're otherwise out of // range of their destination. - addPass(createAArch64BranchRelaxation(), false); + addPass(createAArch64BranchRelaxation()); if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && TM->getSubtarget().isTargetMachO()) addPass(createAArch64CollectLOHPass()); diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 1af622cfdd7..6e198a7d3e0 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -243,9 +243,9 @@ bool ARMPassConfig::addInstSelector() { void ARMPassConfig::addPreRegAlloc() { if (getOptLevel() != CodeGenOpt::None) - addPass(createARMLoadStoreOptimizationPass(true), false); + addPass(createARMLoadStoreOptimizationPass(true)); if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9()) - addPass(createMLxExpansionPass(), false); + addPass(createMLxExpansionPass()); // Since the A15SDOptimizer pass can insert VDUP instructions, it can only be // enabled when NEON is available. if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA15() && @@ -256,23 +256,23 @@ void ARMPassConfig::addPreRegAlloc() { void ARMPassConfig::addPreSched2() { if (getOptLevel() != CodeGenOpt::None) { - addPass(createARMLoadStoreOptimizationPass(), false); + addPass(createARMLoadStoreOptimizationPass()); if (getARMSubtarget().hasNEON()) - addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass), false); + addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); } // Expand some pseudo instructions into multiple instructions to allow // proper scheduling. - addPass(createARMExpandPseudoPass(), false); + addPass(createARMExpandPseudoPass()); if (getOptLevel() != CodeGenOpt::None) { if (!getARMSubtarget().isThumb1Only()) { // in v8, IfConversion depends on Thumb instruction widths if (getARMSubtarget().restrictIT() && !getARMSubtarget().prefers32BitThumb()) - addPass(createThumb2SizeReductionPass(), false); - addPass(&IfConverterID, false); + addPass(createThumb2SizeReductionPass()); + addPass(&IfConverterID); } } if (getARMSubtarget().isThumb2()) @@ -282,12 +282,12 @@ void ARMPassConfig::addPreSched2() { void ARMPassConfig::addPreEmitPass() { if (getARMSubtarget().isThumb2()) { if (!getARMSubtarget().prefers32BitThumb()) - addPass(createThumb2SizeReductionPass(), false); + addPass(createThumb2SizeReductionPass()); // Constant island pass work on unbundled instructions. - addPass(&UnpackMachineBundlesID, false); + addPass(&UnpackMachineBundlesID); } - addPass(createARMOptimizeBarriersPass(), false); + addPass(createARMOptimizeBarriersPass()); addPass(createARMConstantIslandPass()); } diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index bee1cd062d7..4a9889f14cf 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -226,7 +226,7 @@ void MipsTargetMachine::addAnalysisPasses(PassManagerBase &PM) { // print out the code after the passes. void MipsPassConfig::addPreEmitPass() { MipsTargetMachine &TM = getMipsTargetMachine(); - addPass(createMipsDelaySlotFillerPass(TM), false); - addPass(createMipsLongBranchPass(TM), false); + addPass(createMipsDelaySlotFillerPass(TM)); + addPass(createMipsLongBranchPass(TM)); addPass(createMipsConstantIslandPass(TM)); } diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index ce18761aaea..5e6aa7d3dbf 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -193,13 +193,13 @@ void X86PassConfig::addPostRegAlloc() { void X86PassConfig::addPreEmitPass() { if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2()) - addPass(createExecutionDependencyFixPass(&X86::VR128RegClass), false); + addPass(createExecutionDependencyFixPass(&X86::VR128RegClass)); if (UseVZeroUpper) - addPass(createX86IssueVZeroUpperPass(), false); + addPass(createX86IssueVZeroUpperPass()); if (getOptLevel() != CodeGenOpt::None) { - addPass(createX86PadShortFunctions(), false); + addPass(createX86PadShortFunctions()); addPass(createX86FixupLEAs()); } } -- 2.34.1