// No point in running this in 64-bit mode, since some arguments are
// passed in-register in all common calling conventions, so the pattern
// we're looking for will never match.
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
if (STI.is64Bit())
return false;
return true;
// Don't do this when not optimizing for size.
- AttributeSet FnAttrs = MF.getFunction()->getAttributes();
bool OptForSize =
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize) ||
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
+ MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (!OptForSize)
return false;
// If PUSHrmm is not slow on this target, try to fold the source of the
// push into the instruction.
- const X86Subtarget &ST = MF.getTarget().getSubtarget<X86Subtarget>();
+ const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
bool SlowPUSHrmm = ST.isAtom() || ST.isSLM();
// Check that this is legal to fold. Right now, we're extremely
DefMI->getParent() != FrameSetup->getParent())
return nullptr;
- // Be careful with movs that load from a stack slot, since it may get
- // resolved incorrectly.
- // TODO: Again, we already have the infrastructure, so this should work.
- if (!DefMI->getOperand(1).isReg())
- return nullptr;
-
// Now, make sure everything else up until the ADJCALLSTACK is a sequence
// of MOVs. To be less conservative would require duplicating a lot of the
// logic from PeepholeOptimizer.