From: Duncan P. N. Exon Smith Date: Sat, 14 Feb 2015 00:12:15 +0000 (+0000) Subject: Analysis: Canonicalize access to function attributes, NFC X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=efd49ea0d2f0a2ee8537d87cdbe12b24727d6f95;p=oota-llvm.git Analysis: Canonicalize access to function attributes, NFC Canonicalize access to function attributes to use the simpler API. getAttributes().getAttribute(AttributeSet::FunctionIndex, Kind) => getFnAttribute(Kind) getAttributes().hasAttribute(AttributeSet::FunctionIndex, Kind) => hasFnAttribute(Kind) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@229192 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/IPA/InlineCost.cpp b/lib/Analysis/IPA/InlineCost.cpp index 775a9fb1f61..cd494ba84c9 100644 --- a/lib/Analysis/IPA/InlineCost.cpp +++ b/lib/Analysis/IPA/InlineCost.cpp @@ -719,8 +719,7 @@ bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { bool CallAnalyzer::visitCallSite(CallSite CS) { if (CS.hasFnAttr(Attribute::ReturnsTwice) && - !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, - Attribute::ReturnsTwice)) { + !F.hasFnAttribute(Attribute::ReturnsTwice)) { // This aborts the entire analysis. ExposesReturnsTwice = true; return false; @@ -1350,9 +1349,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee, } bool InlineCostAnalysis::isInlineViable(Function &F) { - bool ReturnsTwice = - F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, - Attribute::ReturnsTwice); + bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice); for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { // Disallow inlining of functions which contain indirect branches or // blockaddresses. diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index fa67aeb1bce..f0e70248739 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -298,8 +298,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, // Load widening is hostile to ThreadSanitizer: it may cause false positives // or make the reports more cryptic (access sizes are wrong). - if (LI->getParent()->getParent()->getAttributes(). - hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread)) + if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) return 0; // Get the base of this load. @@ -344,9 +343,9 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, !DL.fitsInLegalInteger(NewLoadByteSize*8)) return 0; - if (LIOffs+NewLoadByteSize > MemLocEnd && - LI->getParent()->getParent()->getAttributes(). - hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeAddress)) + if (LIOffs + NewLoadByteSize > MemLocEnd && + LI->getParent()->getParent()->hasFnAttribute( + Attribute::SanitizeAddress)) // We will be reading past the location accessed by the original program. // While this is safe in a regular build, Address Safety analysis tools // may start reporting false warnings. So, don't do widening.