#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include <vector>
using namespace llvm;
}
Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) {
- assert(EI != 0 && "Not an ExtractValueInst?");
+ assert(EI && "Not an ExtractValueInst?");
Expression e;
e.type = EI->getType();
e.opcode = 0;
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
if (DepLI != LI && Address && DL) {
- int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),
- LI->getPointerOperand(),
+ int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address,
DepLI, *DL);
if (Offset != -1) {
continue;
}
+ // Loading from calloc (which zero initializes memory) -> zero
+ if (isCallocLikeFn(DepInst, TLI)) {
+ ValuesPerBlock.push_back(AvailableValueInBlock::get(
+ DepBB, Constant::getNullValue(LI->getType())));
+ continue;
+ }
+
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
// Check to see how many predecessors have the loaded value fully
// available.
- DenseMap<BasicBlock*, Value*> PredLoads;
+ MapVector<BasicBlock *, Value *> PredLoads;
DenseMap<BasicBlock*, char> FullyAvailableBlocks;
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
continue;
}
- PredLoads[Pred] = nullptr;
if (Pred->getTerminator()->getNumSuccessors() != 1) {
if (isa<IndirectBrInst>(Pred->getTerminator())) {
}
CriticalEdgePred.push_back(Pred);
+ } else {
+ // Only add the predecessors that will not be split for now.
+ PredLoads[Pred] = nullptr;
}
}
// Decide whether PRE is profitable for this load.
- unsigned NumUnavailablePreds = PredLoads.size();
+ unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
assert(NumUnavailablePreds != 0 &&
"Fully available value should already be eliminated!");
return false;
// Split critical edges, and update the unavailable predecessors accordingly.
- for (SmallVectorImpl<BasicBlock *>::iterator I = CriticalEdgePred.begin(),
- E = CriticalEdgePred.end(); I != E; I++) {
- BasicBlock *OrigPred = *I;
+ for (BasicBlock *OrigPred : CriticalEdgePred) {
BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
- PredLoads.erase(OrigPred);
+ assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
PredLoads[NewPred] = nullptr;
DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
<< LoadBB->getName() << '\n');
// Check if the load can safely be moved to all the unavailable predecessors.
bool CanDoPRE = true;
SmallVector<Instruction*, 8> NewInsts;
- for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
- E = PredLoads.end(); I != E; ++I) {
- BasicBlock *UnavailablePred = I->first;
+ for (auto &PredLoad : PredLoads) {
+ BasicBlock *UnavailablePred = PredLoad.first;
// Do PHI translation to get its value in the predecessor if necessary. The
// returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
break;
}
- I->second = LoadPtr;
+ PredLoad.second = LoadPtr;
}
if (!CanDoPRE) {
if (MD) MD->removeInstruction(I);
I->eraseFromParent();
}
- // HINT:Don't revert the edge-splitting as following transformation may
- // also need to split these critial edges.
+ // HINT: Don't revert the edge-splitting as following transformation may
+ // also need to split these critical edges.
return !CriticalEdgePred.empty();
}
VN.lookup_or_add(NewInsts[i]);
}
- for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
- E = PredLoads.end(); I != E; ++I) {
- BasicBlock *UnavailablePred = I->first;
- Value *LoadPtr = I->second;
+ for (const auto &PredLoad : PredLoads) {
+ BasicBlock *UnavailablePred = PredLoad.first;
+ Value *LoadPtr = PredLoad.second;
Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
LI->getAlignment(),
UnavailablePred->getTerminator());
- // Transfer the old load's TBAA tag to the new load.
- if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa))
- NewLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
+ // Transfer the old load's AA tags to the new load.
+ AAMDNodes Tags;
+ LI->getAAMetadata(Tags);
+ if (Tags)
+ NewLoad->setAAMetadata(Tags);
// Transfer DebugLoc.
NewLoad->setDebugLoc(LI->getDebugLoc());
ReplOp->setHasNoUnsignedWrap(false);
}
if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) {
- SmallVector<std::pair<unsigned, MDNode*>, 4> Metadata;
- ReplInst->getAllMetadataOtherThanDebugLoc(Metadata);
- for (int i = 0, n = Metadata.size(); i < n; ++i) {
- unsigned Kind = Metadata[i].first;
- MDNode *IMD = I->getMetadata(Kind);
- MDNode *ReplMD = Metadata[i].second;
- switch(Kind) {
- default:
- ReplInst->setMetadata(Kind, nullptr); // Remove unknown metadata
- break;
- case LLVMContext::MD_dbg:
- llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
- case LLVMContext::MD_tbaa:
- ReplInst->setMetadata(Kind, MDNode::getMostGenericTBAA(IMD, ReplMD));
- break;
- case LLVMContext::MD_range:
- ReplInst->setMetadata(Kind, MDNode::getMostGenericRange(IMD, ReplMD));
- break;
- case LLVMContext::MD_prof:
- llvm_unreachable("MD_prof in a non-terminator instruction");
- break;
- case LLVMContext::MD_fpmath:
- ReplInst->setMetadata(Kind, MDNode::getMostGenericFPMath(IMD, ReplMD));
- break;
- }
- }
+ // FIXME: If both the original and replacement value are part of the
+ // same control-flow region (meaning that the execution of one
+ // guarentees the executation of the other), then we can combine the
+ // noalias scopes here and do better than the general conservative
+ // answer used in combineMetadata().
+
+ // In general, GVN unifies expressions over different control-flow
+ // regions, and so we need a conservative combination of the noalias
+ // scopes.
+ unsigned KnownIDs[] = {
+ LLVMContext::MD_tbaa,
+ LLVMContext::MD_alias_scope,
+ LLVMContext::MD_noalias,
+ LLVMContext::MD_range,
+ LLVMContext::MD_fpmath,
+ LLVMContext::MD_invariant_load,
+ };
+ combineMetadata(ReplInst, I, KnownIDs);
}
}
}
}
+ // If this load follows a calloc (which zero initializes memory),
+ // then the loaded value is zero
+ if (isCallocLikeFn(DepInst, TLI)) {
+ L->replaceAllUsesWith(Constant::getNullValue(L->getType()));
+ markInstructionForDeletion(L);
+ ++NumGVNLoad;
+ return true;
+ }
+
return false;
}
return true;
}
-// performPRE() will trigger assert if it come across an instruciton without
+// performPRE() will trigger assert if it comes across an instruction without
// associated val-num. As it normally has far more live instructions than dead
// instructions, it makes more sense just to "fabricate" a val-number for the
// dead code than checking if instruction involved is dead or not.