return true;
case AArch64::LDURXi:
return true;
+ case AArch64::LDURSWi:
+ return true;
}
}
case AArch64::LDRXui:
case AArch64::LDURXi:
return 8;
+ case AArch64::LDRSWui:
+ case AArch64::LDURSWi:
+ return 4;
}
}
case AArch64::LDRXui:
case AArch64::LDURXi:
return AArch64::LDPXi;
+ case AArch64::LDRSWui:
+ case AArch64::LDURSWi:
+ return AArch64::LDPSWi;
}
}
return AArch64::LDRWpre;
case AArch64::LDRXui:
return AArch64::LDRXpre;
+ case AArch64::LDRSWui:
+ return AArch64::LDRSWpre;
}
}
return AArch64::LDRWpost;
case AArch64::LDRXui:
return AArch64::LDRXpost;
+ case AArch64::LDRSWui:
+ return AArch64::LDRSWpost;
}
}
case AArch64::LDRQui:
case AArch64::LDRXui:
case AArch64::LDRWui:
+ case AArch64::LDRSWui:
// do the unscaled versions as well
case AArch64::STURSi:
case AArch64::STURDi:
case AArch64::LDURDi:
case AArch64::LDURQi:
case AArch64::LDURWi:
- case AArch64::LDURXi: {
+ case AArch64::LDURXi:
+ case AArch64::LDURSWi: {
// If this is a volatile load/store, don't mess with it.
if (MI->hasOrderedMemoryRef()) {
++MBBI;
}
bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- const TargetMachine &TM = Fn.getTarget();
- TII = static_cast<const AArch64InstrInfo *>(
- TM.getSubtargetImpl()->getInstrInfo());
- TRI = TM.getSubtargetImpl()->getRegisterInfo();
+ TII = static_cast<const AArch64InstrInfo *>(Fn.getSubtarget().getInstrInfo());
+ TRI = Fn.getSubtarget().getRegisterInfo();
bool Modified = false;
for (auto &MBB : Fn)