X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FTargetInstrInfoImpl.cpp;h=15340a3f10849ae9b07ff9324cec18f35c5e3af5;hb=81c5ef8649e12ac6083307bb149fd7006d9e0501;hp=5b1d5a62ffbb07dfe8a5e5b2a2abcd9c617c7773;hpb=744b3a5acdbd4d0fac9c6a7c9ad702502cc3cc37;p=oota-llvm.git diff --git a/lib/CodeGen/TargetInstrInfoImpl.cpp b/lib/CodeGen/TargetInstrInfoImpl.cpp index 5b1d5a62ffb..15340a3f108 100644 --- a/lib/CodeGen/TargetInstrInfoImpl.cpp +++ b/lib/CodeGen/TargetInstrInfoImpl.cpp @@ -22,12 +22,18 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/PostRAHazardRecognizer.h" +#include "llvm/CodeGen/ScoreboardHazardRecognizer.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; +static cl::opt DisableHazardRecognizer( + "disable-sched-hazard", cl::Hidden, cl::init(false), + cl::desc("Disable hazard detection during preRA scheduling")); + /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything /// after it, replacing it with an unconditional branch to NewDest. void @@ -134,7 +140,7 @@ bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI, const TargetInstrDesc &TID = MI->getDesc(); if (!TID.isPredicable()) return false; - + for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) { if (TID.OpInfo[i].isPredicate()) { MachineOperand &MO = MI->getOperand(i); @@ -165,8 +171,10 @@ void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB, MBB.insert(I, MI); } -bool TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0, - const MachineInstr *MI1) const { +bool +TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0, + const MachineInstr *MI1, + const MachineRegisterInfo *MRI) const { return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); } @@ -177,19 +185,6 @@ MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig, return MF.CloneMachineInstr(Orig); } -unsigned -TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const { - unsigned FnSize = 0; - for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end(); - MBBI != E; ++MBBI) { - const MachineBasicBlock &MBB = *MBBI; - for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end(); - I != E; ++I) - FnSize += GetInstSizeInBytes(I); - } - return FnSize; -} - // If the COPY instruction in MI can be folded to a stack operation, return // the register class to use. static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, @@ -253,51 +248,44 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, MachineFunction &MF = *MBB->getParent(); // Ask the target to do the actual folding. - MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI); - - // Straight COPY may fold as load/store. - if (!NewMI) { - if (!MI->isCopy() || Ops.size() != 1) - return 0; - - const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); - if (!RC) - return 0; - - const MachineOperand &MO = MI->getOperand(1-Ops[0]); - MachineBasicBlock::iterator Pos = MI; - const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); + if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) { + // Add a memory operand, foldMemoryOperandImpl doesn't do that. + assert((!(Flags & MachineMemOperand::MOStore) || + NewMI->getDesc().mayStore()) && + "Folded a def to a non-store!"); + assert((!(Flags & MachineMemOperand::MOLoad) || + NewMI->getDesc().mayLoad()) && + "Folded a use to a non-load!"); + const MachineFrameInfo &MFI = *MF.getFrameInfo(); + assert(MFI.getObjectOffset(FI) != -1); + MachineMemOperand *MMO = + MF.getMachineMemOperand( + MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)), + Flags, MFI.getObjectSize(FI), + MFI.getObjectAlignment(FI)); + NewMI->addMemOperand(MF, MMO); - if (Flags == MachineMemOperand::MOStore) - storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); - else - loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); - - NewMI = --Pos; - } else { // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI. - NewMI = MBB->insert(MI, NewMI); + return MBB->insert(MI, NewMI); } - if (!NewMI) return 0; + // Straight COPY may fold as load/store. + if (!MI->isCopy() || Ops.size() != 1) + return 0; + const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); + if (!RC) + return 0; - assert((!(Flags & MachineMemOperand::MOStore) || - NewMI->getDesc().mayStore()) && - "Folded a def to a non-store!"); - assert((!(Flags & MachineMemOperand::MOLoad) || - NewMI->getDesc().mayLoad()) && - "Folded a use to a non-load!"); - const MachineFrameInfo &MFI = *MF.getFrameInfo(); - assert(MFI.getObjectOffset(FI) != -1); - MachineMemOperand *MMO = - MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI), - Flags, /*Offset=*/0, - MFI.getObjectSize(FI), - MFI.getObjectAlignment(FI)); - NewMI->addMemOperand(MF, MMO); + const MachineOperand &MO = MI->getOperand(1-Ops[0]); + MachineBasicBlock::iterator Pos = MI; + const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); - return NewMI; + if (Flags == MachineMemOperand::MOStore) + storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); + else + loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); + return --Pos; } /// foldMemoryOperand - Same as the previous version except it allows folding @@ -348,8 +336,13 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, const TargetInstrDesc &TID = MI->getDesc(); // Avoid instructions obviously unsafe for remat. - if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable() || - TID.mayStore()) + if (TID.isNotDuplicable() || TID.mayStore() || + MI->hasUnmodeledSideEffects()) + return false; + + // Don't remat inline asm. We have no idea how expensive it is + // even if it's side effect free. + if (MI->isInlineAsm()) return false; // Avoid instructions which load from potentially varying memory. @@ -433,8 +426,24 @@ bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI, return false; } +// Provide a global flag for disabling the PreRA hazard recognizer that targets +// may choose to honor. +bool TargetInstrInfoImpl::usePreRAHazardRecognizer() const { + return !DisableHazardRecognizer; +} + +// Default implementation of CreateTargetRAHazardRecognizer. +ScheduleHazardRecognizer *TargetInstrInfoImpl:: +CreateTargetHazardRecognizer(const TargetMachine *TM, + const ScheduleDAG *DAG) const { + // Dummy hazard recognizer allows all instructions to issue. + return new ScheduleHazardRecognizer(); +} + // Default implementation of CreateTargetPostRAHazardRecognizer. ScheduleHazardRecognizer *TargetInstrInfoImpl:: -CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const { - return (ScheduleHazardRecognizer *)new PostRAHazardRecognizer(II); +CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, + const ScheduleDAG *DAG) const { + return (ScheduleHazardRecognizer *) + new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); }