X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FMachineCSE.cpp;h=b5c39958240f6093a0cb149661ee96d1886fbd1d;hb=adb294d28413587e0718a6130e8cf90472cda21e;hp=dbc41defeb5a4b0ab2ac1e6a909e338e723e207f;hpb=bb8ddc7e4ff1f06c7289865bbf9e1fa4632d1c8e;p=oota-llvm.git diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp index dbc41defeb5..b5c39958240 100644 --- a/lib/CodeGen/MachineCSE.cpp +++ b/lib/CodeGen/MachineCSE.cpp @@ -15,17 +15,17 @@ #define DEBUG_TYPE "machine-cse" #include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/MachineDominators.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Target/TargetInstrInfo.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/ScopedHashTable.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/RecyclingAllocator.h" +#include "llvm/Target/TargetInstrInfo.h" using namespace llvm; STATISTIC(NumCoalesces, "Number of copies coalesced"); @@ -49,9 +49,9 @@ namespace { initializeMachineCSEPass(*PassRegistry::getPassRegistry()); } - virtual bool runOnMachineFunction(MachineFunction &MF); + bool runOnMachineFunction(MachineFunction &MF) override; - virtual void getAnalysisUsage(AnalysisUsage &AU) const { + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); MachineFunctionPass::getAnalysisUsage(AU); AU.addRequired(); @@ -60,7 +60,7 @@ namespace { AU.addPreserved(); } - virtual void releaseMemory() { + void releaseMemory() override { ScopeMap.clear(); Exps.clear(); } @@ -84,11 +84,11 @@ namespace { bool hasLivePhysRegDefUses(const MachineInstr *MI, const MachineBasicBlock *MBB, SmallSet &PhysRefs, - SmallVector &PhysDefs, + SmallVectorImpl &PhysDefs, bool &PhysUseDef) const; bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, SmallSet &PhysRefs, - SmallVector &PhysDefs, + SmallVectorImpl &PhysDefs, bool &NonLocal) const; bool isCSECandidate(MachineInstr *MI); bool isProfitableToCSE(unsigned CSReg, unsigned Reg, @@ -126,16 +126,29 @@ bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI, // deleted. continue; MachineInstr *DefMI = MRI->getVRegDef(Reg); - if (DefMI->getParent() != MBB) - continue; if (!DefMI->isCopy()) continue; unsigned SrcReg = DefMI->getOperand(1).getReg(); if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) continue; - if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg()) + if (DefMI->getOperand(0).getSubReg()) + continue; + // FIXME: We should trivially coalesce subregister copies to expose CSE + // opportunities on instructions with truncated operands (see + // cse-add-with-overflow.ll). This can be done here as follows: + // if (SrcSubReg) + // RC = TRI->getMatchingSuperRegClass(MRI->getRegClass(SrcReg), RC, + // SrcSubReg); + // MO.substVirtReg(SrcReg, SrcSubReg, *TRI); + // + // The 2-addr pass has been updated to handle coalesced subregs. However, + // some machine-specific code still can't handle it. + // To handle it properly we also need a way find a constrained subregister + // class given a super-reg class and subreg index. + if (DefMI->getOperand(1).getSubReg()) continue; - if (!MRI->constrainRegClass(SrcReg, MRI->getRegClass(Reg))) + const TargetRegisterClass *RC = MRI->getRegClass(Reg); + if (!MRI->constrainRegClass(SrcReg, RC)) continue; DEBUG(dbgs() << "Coalescing: " << *DefMI); DEBUG(dbgs() << "*** to: " << *MI); @@ -195,7 +208,7 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg, bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, const MachineBasicBlock *MBB, SmallSet &PhysRefs, - SmallVector &PhysDefs, + SmallVectorImpl &PhysDefs, bool &PhysUseDef) const{ // First, add all uses to PhysRefs. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { @@ -216,7 +229,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, // Next, collect all defs into PhysDefs. If any is already in PhysRefs // (which currently contains only uses), set the PhysUseDef flag. PhysUseDef = false; - MachineBasicBlock::const_iterator I = MI; I = llvm::next(I); + MachineBasicBlock::const_iterator I = MI; I = std::next(I); for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isDef()) @@ -246,7 +259,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, SmallSet &PhysRefs, - SmallVector &PhysDefs, + SmallVectorImpl &PhysDefs, bool &NonLocal) const { // For now conservatively returns false if the common subexpression is // not in the same basic block as the given instruction. The only exception @@ -267,7 +280,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, } CrossMBB = true; } - MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I); + MachineBasicBlock::const_iterator I = CSMI; I = std::next(I); MachineBasicBlock::const_iterator E = MI; MachineBasicBlock::const_iterator EE = CSMBB->end(); unsigned LookAheadLeft = LookAheadLimit; @@ -312,8 +325,8 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, } bool MachineCSE::isCSECandidate(MachineInstr *MI) { - if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() || - MI->isKill() || MI->isInlineAsm() || MI->isDebugValue()) + if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() || MI->isKill() || + MI->isInlineAsm() || MI->isDebugValue()) return false; // Ignore copies. @@ -515,7 +528,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { bool DoCSE = true; unsigned NumDefs = MI->getDesc().getNumDefs() + MI->getDesc().getNumImplicitDefs(); - + for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isDef())