addAttribute(n, Attribute::ReadOnly);
}
+ /// Optimize this function for minimum size (-Oz).
+ bool optForMinSize() const {
+ return hasFnAttribute(Attribute::MinSize);
+ };
+
+ /// Optimize this function for size (-Os) or minimum size (-Oz).
+ bool optForSize() const {
+ return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize();
+ }
+
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a Function) from the Function Src to this one.
void copyAttributesFrom(const GlobalValue *Src) override;
// instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent();
if (EffectiveTailLen >= 2 &&
+ // FIXME: Use Function::optForSize().
MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
(I1 == MBB1->begin() || I2 == MBB2->begin()))
return true;
TLI = TM->getSubtargetImpl(F)->getTargetLowering();
TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ // FIXME: Use Function::optForSize().
OptSize = F.hasFnAttribute(Attribute::OptimizeForSize);
/// This optimization identifies DIV instructions that can be
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
+ // FIXME: Use Function::optForSize().
if (F.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
return;
if (FunctionChain.begin() == FunctionChain.end())
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = 0;
+ // FIXME: Use Function::optForSize().
OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
+ // FIXME: Use Function::optForSize().
if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
Alignment = std::max(Alignment,
STI->getTargetLowering()->getPrefFunctionAlignment());
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {
- auto *F = DAG.getMachineFunction().getFunction();
- ForCodeSize = F->hasFnAttribute(Attribute::OptimizeForSize) ||
- F->hasFnAttribute(Attribute::MinSize);
+ ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize();
}
/// Runs the dag combiner on all nodes in the work list
}
static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
- const Function *F = MF.getFunction();
- bool HasMinSize = F->hasFnAttribute(Attribute::MinSize);
- bool HasOptSize = F->hasFnAttribute(Attribute::OptimizeForSize);
-
// On Darwin, -Os means optimize for size without hurting performance, so
// only really optimize for size when -Oz (MinSize) is used.
if (MF.getTarget().getTargetTriple().isOSDarwin())
- return HasMinSize;
- return HasOptSize || HasMinSize;
+ return MF.getFunction()->optForMinSize();
+ return MF.getFunction()->optForSize();
}
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
return DAG.getConstantFP(1.0, DL, LHS.getValueType());
const Function *F = DAG.getMachineFunction().getFunction();
+ // FIXME: Use Function::optForSize().
if (!F->hasFnAttribute(Attribute::OptimizeForSize) ||
// If optimizing for size, don't insert too many multiplies. This
// inserts up to 5 multiplies.
// compensate for the duplication.
unsigned MaxDuplicateCount;
if (TailDuplicateSize.getNumOccurrences() == 0 &&
+ // FIXME: Use Function::optForSize().
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
MaxDuplicateCount = 1;
else
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- MinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
+ MinSize = MF.getFunction()->optForMinSize();
bool Changed = false;
CmpConv.runOnMachineFunction(MF);
}
bool runOnMachineFunction(MachineFunction &MF) override {
- ForCodeSize =
- MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
- MF.getFunction()->hasFnAttribute(Attribute::MinSize);
+ ForCodeSize = MF.getFunction()->optForSize();
Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
if (!Subtarget->isCyclone())
return SDValue();
- // Don't split at Oz.
- MachineFunction &MF = DAG.getMachineFunction();
- bool IsMinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
- if (IsMinSize)
+ // Don't split at -Oz.
+ if (DAG.getMachineFunction().getFunction()->optForMinSize())
return SDValue();
SDValue StVal = S->getValue();
// If we are optimizing for size, see if the branch in the predecessor can be
// lowered to cbn?z by the constant island lowering pass, and return false if
// so. This results in a shorter instruction sequence.
- const Function *F = MBB.getParent()->getFunction();
- if (F->hasFnAttribute(Attribute::OptimizeForSize) ||
- F->hasFnAttribute(Attribute::MinSize)) {
+ if (MBB.getParent()->getFunction()->optForSize()) {
MachineBasicBlock *Pred = *MBB.pred_begin();
if (!Pred->empty()) {
MachineInstr *LastMI = &*Pred->rbegin();
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
- if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize))
+ if (!MF.getFunction()->optForMinSize())
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI->getParent()->getParent();
+ // FIXME: Use Function::optForSize().
if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
--Latency;
}
// FIXME: handle tail calls differently.
unsigned CallOpc;
- bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
if (!isDirect && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
- // Emit regular call when code size is the priority
- !HasMinSizeAttr)
+ // Emit regular call when code size is the priority
+ !MF.getFunction()->optForMinSize())
// "mov lr, pc; b _foo" to avoid confusing the RSP
CallOpc = ARMISD::CALL_NOLINK;
else
// immediates as it is inherently position independent, and may be out of
// range otherwise.
return !NoMovt && hasV6T2Ops() &&
- (isTargetWindows() ||
- !MF.getFunction()->hasFnAttribute(Attribute::MinSize));
+ (isTargetWindows() || !MF.getFunction()->optForMinSize());
}
bool ARMSubtarget::useFastISel() const {
if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
return false;
- if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
- STI->avoidMOVsShifterOperand())
+ if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we
- // are optimizing / minimizing for size.
+ // are optimizing for size.
return false;
unsigned Reg0 = MI->getOperand(0).getReg();
if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
return false;
- if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
- STI->avoidMOVsShifterOperand())
+ if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we
- // are optimizing / minimizing for size.
+ // are optimizing for size.
return false;
unsigned Limit = ~0U;
TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
- // Optimizing / minimizing size?
- OptimizeSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
- MinimizeSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
+ // Optimizing / minimizing size? Minimizing size implies optimizing for size.
+ OptimizeSize = MF.getFunction()->optForSize();
+ MinimizeSize = MF.getFunction()->optForMinSize();
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());
}
+// FIXME: Use Function::optForSize().
inline static bool isOptSize(const MachineFunction &MF) {
AttributeSet AF = MF.getFunction()->getAttributes();
return AF.hasAttribute(AttributeSet::FunctionIndex,
}
inline static bool isMinSize(const MachineFunction &MF) {
- AttributeSet AF = MF.getFunction()->getAttributes();
- return AF.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ return MF.getFunction()->optForMinSize();
}
return true;
// Don't do this when not optimizing for size.
- bool OptForSize =
- MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
- MF.getFunction()->hasFnAttribute(Attribute::MinSize);
-
- if (!OptForSize)
+ if (!MF.getFunction()->optForSize())
return false;
unsigned StackAlign = TFL->getStackAlignment();
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
+ // FIXME: Use Function::optForSize().
OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
const Function *F = DAG.getMachineFunction().getFunction();
+ // FIXME: Use Function::optForSize().
bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// Handle broadcasting a single constant scalar from the constant pool
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
- const Function *F = DAG.getMachineFunction().getFunction();
- bool MinSize = F->hasFnAttribute(Attribute::MinSize);
+ bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
// if we're optimizing for size, however, as that'll allow better folding
// of memory operations.
if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
- !DAG.getMachineFunction().getFunction()->hasFnAttribute(
- Attribute::MinSize) &&
+ !DAG.getMachineFunction().getFunction()->optForMinSize() &&
!Subtarget->isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
MachineFunction &MF = DAG.getMachineFunction();
+ // FIXME: Use Function::optForSize().
bool OptForSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
- if (isCallRegIndirect &&
- !MF.getFunction()->hasFnAttribute(Attribute::MinSize) &&
+ if (isCallRegIndirect && !MF.getFunction()->optForMinSize() &&
(MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r ||
MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r ||
MI->getOpcode() == X86::PUSH64r))
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
+ // FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
+ // FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
- if (MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
- MF.getFunction()->hasFnAttribute(Attribute::MinSize)) {
+ if (MF.getFunction()->optForSize()) {
return false;
}
// would decrease the threshold.
Function *Caller = CS.getCaller();
bool OptSize = Caller && !Caller->isDeclaration() &&
+ // FIXME: Use Function::optForSize().
Caller->hasFnAttribute(Attribute::OptimizeForSize);
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
OptSizeThreshold < thres)
: UP.DynamicCostSavingsDiscount;
if (!UserThreshold &&
+ // FIXME: Use Function::optForSize().
L->getHeader()->getParent()->hasFnAttribute(
Attribute::OptimizeForSize)) {
Threshold = UP.OptSizeThreshold;
}
// Do not do non-trivial unswitch while optimizing for size.
+ // FIXME: Use Function::optForSize().
if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize))
return false;
// Check the function attributes to find out if this function should be
// optimized for size.
bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
+ // FIXME: Use Function::optForSize().
F->hasFnAttribute(Attribute::OptimizeForSize);
// Compute the weighted frequency of this loop being executed and see if it