#define DEBUG_TYPE "codegenprepare"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/IRBuilder.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/Instructions.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Pass.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/DominatorInternals.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include "llvm/Transforms/Utils/BypassSlowDivision.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
using namespace llvm::PatternMatch;
}
bool runOnFunction(Function &F);
+ const char *getPassName() const { return "CodeGen Prepare"; }
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<DominatorTree>();
AU.addPreserved<ProfileInfo>();
bool MoveExtToFormExtLoad(Instruction *I);
bool OptimizeExtUses(Instruction *I);
bool OptimizeSelectInst(SelectInst *SI);
- bool DupRetToEnableTailCallOpts(ReturnInst *RI);
+ bool DupRetToEnableTailCallOpts(BasicBlock *BB);
bool PlaceDbgValues(Function &F);
};
}
TLInfo = &getAnalysis<TargetLibraryInfo>();
DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
- OptSize = F.hasFnAttr(Attribute::OptimizeForSize);
+ OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeForSize);
+
+ /// This optimization identifies DIV instructions that can be
+ /// profitably bypassed and carried out with a shorter, faster divide.
+ if (TLI && TLI->isSlowDivBypassed()) {
+ const DenseMap<unsigned int, unsigned int> &BypassWidths =
+ TLI->getBypassSlowDivWidths();
+ for (Function::iterator I = F.begin(); I != F.end(); I++)
+ EverMadeChange |= bypassSlowDivision(F, I, BypassWidths);
+ }
- // First pass, eliminate blocks that contain only PHI nodes and an
+ // Eliminate blocks that contain only PHI nodes and an
// unconditional branch.
EverMadeChange |= EliminateMostlyEmptyBlocks(F);
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
+ for (Function::iterator I = F.begin(); I != F.end(); ) {
BasicBlock *BB = I++;
MadeChange |= OptimizeBlock(*BB);
}
WorkList.insert(*II);
}
- for (SmallPtrSet<BasicBlock*, 8>::iterator
- I = WorkList.begin(), E = WorkList.end(); I != E; ++I)
- DeleteDeadBlock(*I);
+ // Delete the dead blocks and any of their dead successors.
+ MadeChange |= !WorkList.empty();
+ while (!WorkList.empty()) {
+ BasicBlock *BB = *WorkList.begin();
+ WorkList.erase(BB);
+ SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
+
+ DeleteDeadBlock(BB);
+
+ for (SmallVectorImpl<BasicBlock*>::iterator
+ II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
+ if (pred_begin(*II) == pred_end(*II))
+ WorkList.insert(*II);
+ }
// Merge pairs of basic blocks with unconditional branches, connected by
// a single edge.
// edge, just collapse it.
BasicBlock *SinglePred = BB->getSinglePredecessor();
- if (!SinglePred || SinglePred == BB) continue;
+ // Don't merge if BB's address is taken.
+ if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
if (Term && !Term->isConditional()) {
// happens.
WeakVH IterHandle(CurInstIterator);
- replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0,
+ replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
TLInfo, ModifiedDT ? 0 : DT);
// If the iterator instruction was recursively deleted, start over at the
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
- // We'll need TargetData from here on out.
- const TargetData *TD = TLI ? TLI->getTargetData() : 0;
+ // We'll need DataLayout from here on out.
+ const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
/// instructions to the predecessor to enable tail call optimizations. The
/// case it is currently looking for is:
+/// @code
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// br label %return
/// return:
/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
/// ret i32 %retval
+/// @endcode
///
/// =>
///
+/// @code
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// ret i32 %tmp0
/// bb2:
/// %tmp2 = tail call i32 @f2()
/// ret i32 %tmp2
-///
-bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
+/// @endcode
+bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) {
if (!TLI)
return false;
+ ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
+ if (!RI)
+ return false;
+
PHINode *PN = 0;
BitCastInst *BCI = 0;
Value *V = RI->getReturnValue();
return false;
}
- BasicBlock *BB = RI->getParent();
if (PN && PN->getParent() != BB)
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
// See llvm::isInTailCallPosition().
const Function *F = BB->getParent();
- Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ Attribute CallerRetAttr = F->getAttributes().getRetAttributes();
+ if (CallerRetAttr.hasAttribute(Attribute::ZExt) ||
+ CallerRetAttr.hasAttribute(Attribute::SExt))
return false;
// Make sure there are no instructions between the PHI and return, or that the
// Conservatively require the attributes of the call to match those of the
// return. Ignore noalias because it doesn't affect the call sequence.
- Attributes CalleeRetAttr = CS.getAttributes().getRetAttributes();
- if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
+ Attribute CalleeRetAttr = CS.getAttributes().getRetAttributes();
+ if (AttrBuilder(CalleeRetAttr).
+ removeAttribute(Attribute::NoAlias) !=
+ AttrBuilder(CallerRetAttr).
+ removeAttribute(Attribute::NoAlias))
continue;
// Make sure the call instruction is followed by an unconditional branch to
}
// If we eliminated all predecessors of the block, delete the block now.
- if (Changed && pred_begin(BB) == pred_end(BB))
+ if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
BB->eraseFromParent();
return Changed;
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
Type *IntPtrTy =
- TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
+ TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
Value *Result = 0;
}
+/// If we have a SelectInst that will likely profit from branch prediction,
+/// turn it into a branch.
bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
- // If we have a SelectInst that will likely profit from branch prediction,
- // turn it into a branch.
- if (DisableSelectToBranch || OptSize || !TLI ||
- !TLI->isPredictableSelectExpensive())
- return false;
+ bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
- if (!SI->getCondition()->getType()->isIntegerTy(1) ||
- !isFormingBranchFromSelectProfitable(SI))
+ // Can we convert the 'select' to CF ?
+ if (DisableSelectToBranch || OptSize || !TLI || VectorCond)
return false;
+ TargetLowering::SelectSupportKind SelectKind;
+ if (VectorCond)
+ SelectKind = TargetLowering::VectorMaskSelect;
+ else if (SI->getType()->isVectorTy())
+ SelectKind = TargetLowering::ScalarCondVectorVal;
+ else
+ SelectKind = TargetLowering::ScalarValSelect;
+
+ // Do we have efficient codegen support for this kind of 'selects' ?
+ if (TLI->isSelectSupported(SelectKind)) {
+ // We have efficient codegen support for the select instruction.
+ // Check if it is profitable to keep this 'select'.
+ if (!TLI->isPredictableSelectExpensive() ||
+ !isFormingBranchFromSelectProfitable(SI))
+ return false;
+ }
+
ModifiedDT = true;
// First, we split the block containing the select into 2 blocks.
if (CallInst *CI = dyn_cast<CallInst>(I))
return OptimizeCallInst(CI);
- if (ReturnInst *RI = dyn_cast<ReturnInst>(I))
- return DupRetToEnableTailCallOpts(RI);
-
if (SelectInst *SI = dyn_cast<SelectInst>(I))
return OptimizeSelectInst(SI);
bool MadeChange = false;
CurInstIterator = BB.begin();
- for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; )
+ while (CurInstIterator != BB.end())
MadeChange |= OptimizeInst(CurInstIterator++);
+ MadeChange |= DupRetToEnableTailCallOpts(&BB);
+
return MadeChange;
}