Summary:
TargetInstrInfo::getLdStBaseRegImmOfs to
TargetInstrInfo::getMemOpBaseRegImmOfs and implement for x86. The
implementation only handles a few easy cases now and will be made more
sophisticated in the future.
This is NFCI: the only user of `getLdStBaseRegImmOfs` (now
`getmemOpBaseRegImmOfs`) is `LoadClusterMotion` and `LoadClusterMotion`
is disabled for x86.
Reviewers: reames, ab, MatzeB, atrick
Reviewed By: MatzeB, atrick
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D10199
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@239741
91177308-0d34-0410-b5e6-
96231b3b80d8
return false;
}
- /// Get the base register and byte offset of a load/store instr.
- virtual bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+ /// Get the base register and byte offset of an instruction that reads/writes
+ /// memory.
+ virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
return false;
}
SUnit *SU = Loads[Idx];
unsigned BaseReg;
unsigned Offset;
- if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
+ if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
}
if (LoadRecords.size() < 2)
// base registers are identical, and the offset of a lower memory access +
// the width doesn't overlap the offset of a higher memory access,
// then the memory accesses are different.
- if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
- getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
+ if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
+ getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
if (BaseRegA == BaseRegB) {
int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
}
bool
-AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
switch (LdSt->getOpcode()) {
default:
return false;
};
}
-bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
+bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
const TargetRegisterInfo *TRI) const {
// Handle only loads/stores with base register followed by immediate offset.
/// Detect opportunities for ldp/stp formation.
///
-/// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
+/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
unsigned NumLoads) const {
return false;
if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
return false;
- // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
+ // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
// Allow 6 bits of positive range.
if (Ofs1 > 64)
/// Hint that pairing the given load or store is unprofitable.
void suppressLdStPair(MachineInstr *MI) const;
- bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
- const TargetRegisterInfo *TRI) const override;
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const override;
- bool getLdStBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
- int &Offset, int &Width,
- const TargetRegisterInfo *TRI) const;
+ bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
+ int &Offset, int &Width,
+ const TargetRegisterInfo *TRI) const;
bool enableClusterLoads() const override { return true; }
continue;
unsigned BaseReg;
unsigned Offset;
- if (TII->getLdStBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
+ if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
if (PrevBaseReg == BaseReg) {
// If this block can take STPs, skip ahead to the next block.
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
}
}
-bool SIInstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
unsigned Opc = LdSt->getOpcode();
if (isDS(Opc)) {
const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
unsigned BaseReg0, Offset0;
unsigned BaseReg1, Offset1;
- if (getLdStBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
- getLdStBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
+ if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
+ getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() &&
"read2 / write2 not expected here yet");
unsigned Width0 = (*MIa->memoperands_begin())->getSize();
int64_t &Offset1,
int64_t &Offset2) const override;
- bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const final;
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const final;
bool shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
}
}
+bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
+ const MCInstrDesc &Desc = MemOp->getDesc();
+ int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode());
+ if (MemRefBegin < 0)
+ return false;
+
+ MemRefBegin += X86II::getOperandBias(Desc);
+
+ BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg();
+ if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
+ return false;
+
+ if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
+ X86::NoRegister)
+ return false;
+
+ const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp);
+
+ // Displacement can be symbolic
+ if (!DispMO.isImm())
+ return false;
+
+ Offset = DispMO.getImm();
+
+ return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
+ X86::NoRegister);
+}
+
static unsigned getStoreRegOpcode(unsigned SrcReg,
const TargetRegisterClass *RC,
bool isStackAligned,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
+
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,