//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "memdep"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/Function.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/Statistic.h"
-
-#define DEBUG_TYPE "memdep"
-
using namespace llvm;
-namespace {
- // Control the calculation of non-local dependencies by only examining the
- // predecessors if the basic block has less than X amount (50 by default).
- cl::opt<int>
- PredLimit("nonlocaldep-threshold", cl::Hidden, cl::init(50),
- cl::desc("Control the calculation of non-local"
- "dependencies (default = 50)"));
-}
-
-STATISTIC(NumCacheNonlocal, "Number of cached non-local responses");
-STATISTIC(NumUncacheNonlocal, "Number of uncached non-local responses");
-
+STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
+STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
+STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
char MemoryDependenceAnalysis::ID = 0;
-Instruction* const MemoryDependenceAnalysis::NonLocal = (Instruction*)-3;
-Instruction* const MemoryDependenceAnalysis::None = (Instruction*)-4;
-Instruction* const MemoryDependenceAnalysis::Dirty = (Instruction*)-5;
-
// Register this pass...
static RegisterPass<MemoryDependenceAnalysis> X("memdep",
- "Memory Dependence Analysis", false, true);
-
-void MemoryDependenceAnalysis::ping(Instruction *D) {
- for (depMapType::iterator I = depGraphLocal.begin(), E = depGraphLocal.end();
- I != E; ++I) {
- assert(I->first != D);
- assert(I->second.first != D);
- }
-
- for (nonLocalDepMapType::iterator I = depGraphNonLocal.begin(), E = depGraphNonLocal.end();
- I != E; ++I) {
- assert(I->first != D);
- }
-
- for (reverseDepMapType::iterator I = reverseDep.begin(), E = reverseDep.end();
- I != E; ++I)
- for (SmallPtrSet<Instruction*, 4>::iterator II = I->second.begin(), EE = I->second.end();
- II != EE; ++II)
- assert(*II != D);
-
- for (reverseDepMapType::iterator I = reverseDepNonLocal.begin(), E = reverseDepNonLocal.end();
- I != E; ++I)
- for (SmallPtrSet<Instruction*, 4>::iterator II = I->second.begin(), EE = I->second.end();
- II != EE; ++II)
- assert(*II != D);
-}
+ "Memory Dependence Analysis", false, true);
/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
///
AU.addRequiredTransitive<TargetData>();
}
-/// getCallSiteDependency - Private helper for finding the local dependencies
-/// of a call site.
-Instruction* MemoryDependenceAnalysis::getCallSiteDependency(CallSite C,
- Instruction* start,
- BasicBlock* block) {
-
- std::pair<Instruction*, bool>& cachedResult =
- depGraphLocal[C.getInstruction()];
- AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
- TargetData& TD = getAnalysis<TargetData>();
- BasicBlock::iterator blockBegin = C.getInstruction()->getParent()->begin();
- BasicBlock::iterator QI = C.getInstruction();
-
- // If the starting point was specifiy, use it
- if (start) {
- QI = start;
- blockBegin = start->getParent()->begin();
- // If the starting point wasn't specified, but the block was, use it
- } else if (!start && block) {
- QI = block->end();
- blockBegin = block->begin();
- }
-
+bool MemoryDependenceAnalysis::runOnFunction(Function &) {
+ AA = &getAnalysis<AliasAnalysis>();
+ TD = &getAnalysis<TargetData>();
+ return false;
+}
+
+
+/// getCallSiteDependencyFrom - Private helper for finding the local
+/// dependencies of a call site.
+MemDepResult MemoryDependenceAnalysis::
+getCallSiteDependencyFrom(CallSite CS, BasicBlock::iterator ScanIt,
+ BasicBlock *BB) {
// Walk backwards through the block, looking for dependencies
- while (QI != blockBegin) {
- --QI;
+ while (ScanIt != BB->begin()) {
+ Instruction *Inst = --ScanIt;
// If this inst is a memory op, get the pointer it accessed
- Value* pointer = 0;
- uint64_t pointerSize = 0;
- if (StoreInst* S = dyn_cast<StoreInst>(QI)) {
- pointer = S->getPointerOperand();
- pointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType());
- } else if (AllocationInst* AI = dyn_cast<AllocationInst>(QI)) {
- pointer = AI;
- if (ConstantInt* C = dyn_cast<ConstantInt>(AI->getArraySize()))
- pointerSize = C->getZExtValue() * \
- TD.getABITypeSize(AI->getAllocatedType());
- else
- pointerSize = ~0UL;
- } else if (VAArgInst* V = dyn_cast<VAArgInst>(QI)) {
- pointer = V->getOperand(0);
- pointerSize = TD.getTypeStoreSize(V->getType());
- } else if (FreeInst* F = dyn_cast<FreeInst>(QI)) {
- pointer = F->getPointerOperand();
+ Value *Pointer = 0;
+ uint64_t PointerSize = 0;
+ if (StoreInst *S = dyn_cast<StoreInst>(Inst)) {
+ Pointer = S->getPointerOperand();
+ PointerSize = TD->getTypeStoreSize(S->getOperand(0)->getType());
+ } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
+ Pointer = V->getOperand(0);
+ PointerSize = TD->getTypeStoreSize(V->getType());
+ } else if (FreeInst *F = dyn_cast<FreeInst>(Inst)) {
+ Pointer = F->getPointerOperand();
// FreeInsts erase the entire structure
- pointerSize = ~0UL;
- } else if (isa<CallInst>(QI)) {
- AliasAnalysis::ModRefBehavior result =
- AA.getModRefBehavior(CallSite::get(QI));
- if (result != AliasAnalysis::DoesNotAccessMemory &&
- result != AliasAnalysis::OnlyReadsMemory) {
- if (!start && !block) {
- cachedResult.first = QI;
- cachedResult.second = true;
- reverseDep[QI].insert(C.getInstruction());
- }
- return QI;
- } else {
+ PointerSize = ~0UL;
+ } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
+ CallSite InstCS = CallSite::get(Inst);
+ // If these two calls do not interfere, look past it.
+ if (AA->getModRefInfo(CS, InstCS) == AliasAnalysis::NoModRef)
continue;
- }
- } else
+
+ // FIXME: If this is a ref/ref result, we should ignore it!
+ // X = strlen(P);
+ // Y = strlen(Q);
+ // Z = strlen(P); // Z = X
+
+ // If they interfere, we generally return clobber. However, if they are
+ // calls to the same read-only functions we return Def.
+ if (!AA->onlyReadsMemory(CS) || CS.getCalledFunction() == 0 ||
+ CS.getCalledFunction() != InstCS.getCalledFunction())
+ return MemDepResult::getClobber(Inst);
+ return MemDepResult::getDef(Inst);
+ } else {
+ // Non-memory instruction.
continue;
-
- if (AA.getModRefInfo(C, pointer, pointerSize) != AliasAnalysis::NoModRef) {
- if (!start && !block) {
- cachedResult.first = QI;
- cachedResult.second = true;
- reverseDep[QI].insert(C.getInstruction());
- }
- return QI;
}
+
+ if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef)
+ return MemDepResult::getClobber(Inst);
}
- // No dependence found
- cachedResult.first = NonLocal;
- cachedResult.second = true;
- reverseDep[NonLocal].insert(C.getInstruction());
- return NonLocal;
+ // No dependence found.
+ return MemDepResult::getNonLocal();
}
-/// nonLocalHelper - Private helper used to calculate non-local dependencies
-/// by doing DFS on the predecessors of a block to find its dependencies
-void MemoryDependenceAnalysis::nonLocalHelper(Instruction* query,
- BasicBlock* block,
- DenseMap<BasicBlock*, Value*>& resp) {
- // Set of blocks that we've already visited in our DFS
- SmallPtrSet<BasicBlock*, 4> visited;
- // If we're updating a dirtied cache entry, we don't need to reprocess
- // already computed entries.
- for (DenseMap<BasicBlock*, Value*>::iterator I = resp.begin(),
- E = resp.end(); I != E; ++I)
- if (I->second != Dirty)
- visited.insert(I->first);
+/// getDependencyFrom - Return the instruction on which a memory operation
+/// depends.
+MemDepResult MemoryDependenceAnalysis::
+getDependencyFrom(Instruction *QueryInst, BasicBlock::iterator ScanIt,
+ BasicBlock *BB) {
+ // The first instruction in a block is always non-local.
+ if (ScanIt == BB->begin())
+ return MemDepResult::getNonLocal();
- // Current stack of the DFS
- SmallVector<BasicBlock*, 4> stack;
- for (pred_iterator PI = pred_begin(block), PE = pred_end(block);
- PI != PE; ++PI)
- stack.push_back(*PI);
+ // Get the pointer value for which dependence will be determined
+ Value *MemPtr = 0;
+ uint64_t MemSize = 0;
- // Do a basic DFS
- while (!stack.empty()) {
- BasicBlock* BB = stack.back();
-
- // If we've already visited this block, no need to revist
- if (visited.count(BB)) {
- stack.pop_back();
- continue;
- }
+ if (StoreInst *SI = dyn_cast<StoreInst>(QueryInst)) {
+ // If this is a volatile store, don't mess around with it. Just return the
+ // previous instruction as a clobber.
+ if (SI->isVolatile())
+ return MemDepResult::getClobber(--ScanIt);
+
+ MemPtr = SI->getPointerOperand();
+ MemSize = TD->getTypeStoreSize(SI->getOperand(0)->getType());
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) {
+ // If this is a volatile load, don't mess around with it. Just return the
+ // previous instruction as a clobber.
+ if (LI->isVolatile())
+ return MemDepResult::getClobber(--ScanIt);
- // If we find a new block with a local dependency for query,
- // then we insert the new dependency and backtrack.
- if (BB != block) {
- visited.insert(BB);
+ MemPtr = LI->getPointerOperand();
+ MemSize = TD->getTypeStoreSize(LI->getType());
+ } else if (FreeInst *FI = dyn_cast<FreeInst>(QueryInst)) {
+ MemPtr = FI->getPointerOperand();
+ // FreeInsts erase the entire structure, not just a field.
+ MemSize = ~0UL;
+ } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
+ assert(0 && "Should use getCallSiteDependencyFrom!");
+ return getCallSiteDependencyFrom(CallSite::get(QueryInst), ScanIt, BB);
+ } else {
+ // Otherwise, this is a vaarg or non-memory instruction, just return a
+ // clobber dependency on the previous inst.
+ return MemDepResult::getClobber(--ScanIt);
+ }
+
+ // Walk backwards through the basic block, looking for dependencies
+ while (ScanIt != BB->begin()) {
+ Instruction *Inst = --ScanIt;
+
+ // Values depend on loads if the pointers are must aliased. This means that
+ // a load depends on another must aliased load from the same value.
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ Value *Pointer = LI->getPointerOperand();
+ uint64_t PointerSize = TD->getTypeStoreSize(LI->getType());
- Instruction* localDep = getDependency(query, 0, BB);
- if (localDep != NonLocal) {
- resp.insert(std::make_pair(BB, localDep));
- stack.pop_back();
-
+ // If we found a pointer, check if it could be the same as our pointer.
+ AliasAnalysis::AliasResult R =
+ AA->alias(Pointer, PointerSize, MemPtr, MemSize);
+ if (R == AliasAnalysis::NoAlias)
continue;
- }
- // If we re-encounter the starting block, we still need to search it
- // because there might be a dependency in the starting block AFTER
- // the position of the query. This is necessary to get loops right.
- } else if (BB == block) {
- visited.insert(BB);
- Instruction* localDep = getDependency(query, 0, BB);
- if (localDep != query)
- resp.insert(std::make_pair(BB, localDep));
+ // May-alias loads don't depend on each other without a dependence.
+ if (isa<LoadInst>(QueryInst) && R == AliasAnalysis::MayAlias)
+ continue;
+ return MemDepResult::getDef(Inst);
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ Value *Pointer = SI->getPointerOperand();
+ uint64_t PointerSize = TD->getTypeStoreSize(SI->getOperand(0)->getType());
+
+ // If we found a pointer, check if it could be the same as our pointer.
+ AliasAnalysis::AliasResult R =
+ AA->alias(Pointer, PointerSize, MemPtr, MemSize);
- stack.pop_back();
+ if (R == AliasAnalysis::NoAlias)
+ continue;
+ if (R == AliasAnalysis::MayAlias)
+ return MemDepResult::getClobber(Inst);
+ return MemDepResult::getDef(Inst);
+ }
+
+ // If this is an allocation, and if we know that the accessed pointer is to
+ // the allocation, return Def. This means that there is no dependence and
+ // the access can be optimized based on that. For example, a load could
+ // turn into undef.
+ if (AllocationInst *AI = dyn_cast<AllocationInst>(Inst)) {
+ Value *AccessPtr = MemPtr->getUnderlyingObject();
+ if (AccessPtr == AI ||
+ AA->alias(AI, 1, AccessPtr, 1) == AliasAnalysis::MustAlias)
+ return MemDepResult::getDef(AI);
continue;
}
- // If we didn't find anything, recurse on the precessors of this block
- // Only do this for blocks with a small number of predecessors.
- bool predOnStack = false;
- bool inserted = false;
- if (std::distance(pred_begin(BB), pred_end(BB)) <= PredLimit) {
- for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- PI != PE; ++PI)
- if (!visited.count(*PI)) {
- stack.push_back(*PI);
- inserted = true;
- } else
- predOnStack = true;
- }
-
- // If we inserted a new predecessor, then we'll come back to this block
- if (inserted)
+ // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
+ if (AA->getModRefInfo(Inst, MemPtr, MemSize) == AliasAnalysis::NoModRef)
continue;
- // If we didn't insert because we have no predecessors, then this
- // query has no dependency at all.
- else if (!inserted && !predOnStack) {
- resp.insert(std::make_pair(BB, None));
- // If we didn't insert because our predecessors are already on the stack,
- // then we might still have a dependency, but it will be discovered during
- // backtracking.
- } else if (!inserted && predOnStack){
- resp.insert(std::make_pair(BB, NonLocal));
- }
- stack.pop_back();
+ // Otherwise, there is a dependence.
+ return MemDepResult::getClobber(Inst);
}
-}
-
-/// getNonLocalDependency - Fills the passed-in map with the non-local
-/// dependencies of the queries. The map will contain NonLocal for
-/// blocks between the query and its dependencies.
-void MemoryDependenceAnalysis::getNonLocalDependency(Instruction* query,
- DenseMap<BasicBlock*, Value*>& resp) {
- if (depGraphNonLocal.count(query)) {
- DenseMap<BasicBlock*, Value*>& cached = depGraphNonLocal[query];
- NumCacheNonlocal++;
-
- SmallVector<BasicBlock*, 4> dirtied;
- for (DenseMap<BasicBlock*, Value*>::iterator I = cached.begin(),
- E = cached.end(); I != E; ++I)
- if (I->second == Dirty)
- dirtied.push_back(I->first);
-
- for (SmallVector<BasicBlock*, 4>::iterator I = dirtied.begin(),
- E = dirtied.end(); I != E; ++I) {
- Instruction* localDep = getDependency(query, 0, *I);
- if (localDep != NonLocal)
- cached[*I] = localDep;
- else {
- cached.erase(*I);
- nonLocalHelper(query, *I, cached);
- }
- }
-
- resp = cached;
-
- return;
- } else
- NumUncacheNonlocal++;
-
- // If not, go ahead and search for non-local deps.
- nonLocalHelper(query, query->getParent(), resp);
- // Update the non-local dependency cache
- for (DenseMap<BasicBlock*, Value*>::iterator I = resp.begin(), E = resp.end();
- I != E; ++I) {
- depGraphNonLocal[query].insert(*I);
- reverseDepNonLocal[I->second].insert(query);
- }
+ // If we found nothing, return the non-local flag.
+ return MemDepResult::getNonLocal();
}
/// getDependency - Return the instruction on which a memory operation
-/// depends. The local parameter indicates if the query should only
-/// evaluate dependencies within the same basic block.
-Instruction* MemoryDependenceAnalysis::getDependency(Instruction* query,
- Instruction* start,
- BasicBlock* block) {
- // Start looking for dependencies with the queried inst
- BasicBlock::iterator QI = query;
+/// depends.
+MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
+ Instruction *ScanPos = QueryInst;
// Check for a cached result
- std::pair<Instruction*, bool>& cachedResult = depGraphLocal[query];
- // If we have a _confirmed_ cached entry, return it
- if (!block && !start) {
- if (cachedResult.second)
- return cachedResult.first;
- else if (cachedResult.first && cachedResult.first != NonLocal)
- // If we have an unconfirmed cached entry, we can start our search from there
- QI = cachedResult.first;
+ MemDepResult &LocalCache = LocalDeps[QueryInst];
+
+ // If the cached entry is non-dirty, just return it. Note that this depends
+ // on MemDepResult's default constructing to 'dirty'.
+ if (!LocalCache.isDirty())
+ return LocalCache;
+
+ // Otherwise, if we have a dirty entry, we know we can start the scan at that
+ // instruction, which may save us some work.
+ if (Instruction *Inst = LocalCache.getInst()) {
+ ScanPos = Inst;
+
+ SmallPtrSet<Instruction*, 4> &InstMap = ReverseLocalDeps[Inst];
+ InstMap.erase(QueryInst);
+ if (InstMap.empty())
+ ReverseLocalDeps.erase(Inst);
}
- if (start)
- QI = start;
- else if (!start && block)
- QI = block->end();
+ // Do the scan.
+ if (!isa<CallInst>(QueryInst) && !isa<InvokeInst>(QueryInst))
+ LocalCache = getDependencyFrom(QueryInst, ScanPos, QueryInst->getParent());
+ else
+ LocalCache = getCallSiteDependencyFrom(CallSite::get(QueryInst), ScanPos,
+ QueryInst->getParent());
- AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
- TargetData& TD = getAnalysis<TargetData>();
+ // Remember the result!
+ if (Instruction *I = LocalCache.getInst())
+ ReverseLocalDeps[I].insert(QueryInst);
- // Get the pointer value for which dependence will be determined
- Value* dependee = 0;
- uint64_t dependeeSize = 0;
- bool queryIsVolatile = false;
- if (StoreInst* S = dyn_cast<StoreInst>(query)) {
- dependee = S->getPointerOperand();
- dependeeSize = TD.getTypeStoreSize(S->getOperand(0)->getType());
- queryIsVolatile = S->isVolatile();
- } else if (LoadInst* L = dyn_cast<LoadInst>(query)) {
- dependee = L->getPointerOperand();
- dependeeSize = TD.getTypeStoreSize(L->getType());
- queryIsVolatile = L->isVolatile();
- } else if (VAArgInst* V = dyn_cast<VAArgInst>(query)) {
- dependee = V->getOperand(0);
- dependeeSize = TD.getTypeStoreSize(V->getType());
- } else if (FreeInst* F = dyn_cast<FreeInst>(query)) {
- dependee = F->getPointerOperand();
+ return LocalCache;
+}
+
+/// getNonLocalDependency - Perform a full dependency query for the
+/// specified instruction, returning the set of blocks that the value is
+/// potentially live across. The returned set of results will include a
+/// "NonLocal" result for all blocks where the value is live across.
+///
+/// This method assumes the instruction returns a "nonlocal" dependency
+/// within its own block.
+///
+const MemoryDependenceAnalysis::NonLocalDepInfo &
+MemoryDependenceAnalysis::getNonLocalDependency(Instruction *QueryInst) {
+ assert(getDependency(QueryInst).isNonLocal() &&
+ "getNonLocalDependency should only be used on insts with non-local deps!");
+ PerInstNLInfo &CacheP = NonLocalDeps[QueryInst];
+
+ NonLocalDepInfo &Cache = CacheP.first;
+
+ /// DirtyBlocks - This is the set of blocks that need to be recomputed. In
+ /// the cached case, this can happen due to instructions being deleted etc. In
+ /// the uncached case, this starts out as the set of predecessors we care
+ /// about.
+ SmallVector<BasicBlock*, 32> DirtyBlocks;
+
+ if (!Cache.empty()) {
+ // Okay, we have a cache entry. If we know it is not dirty, just return it
+ // with no computation.
+ if (!CacheP.second) {
+ NumCacheNonLocal++;
+ return Cache;
+ }
- // FreeInsts erase the entire structure, not just a field
- dependeeSize = ~0UL;
- } else if (CallSite::get(query).getInstruction() != 0)
- return getCallSiteDependency(CallSite::get(query), start, block);
- else if (isa<AllocationInst>(query))
- return None;
- else
- return None;
+ // If we already have a partially computed set of results, scan them to
+ // determine what is dirty, seeding our initial DirtyBlocks worklist.
+ for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
+ I != E; ++I)
+ if (I->second.isDirty())
+ DirtyBlocks.push_back(I->first);
+
+ // Sort the cache so that we can do fast binary search lookups below.
+ std::sort(Cache.begin(), Cache.end());
+
+ ++NumCacheDirtyNonLocal;
+ //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
+ // << Cache.size() << " cached: " << *QueryInst;
+ } else {
+ // Seed DirtyBlocks with each of the preds of QueryInst's block.
+ BasicBlock *QueryBB = QueryInst->getParent();
+ DirtyBlocks.append(pred_begin(QueryBB), pred_end(QueryBB));
+ NumUncacheNonLocal++;
+ }
- BasicBlock::iterator blockBegin = block ? block->begin()
- : query->getParent()->begin();
+ // Visited checked first, vector in sorted order.
+ SmallPtrSet<BasicBlock*, 64> Visited;
- // Walk backwards through the basic block, looking for dependencies
- while (QI != blockBegin) {
- --QI;
+ unsigned NumSortedEntries = Cache.size();
+
+ // Iterate while we still have blocks to update.
+ while (!DirtyBlocks.empty()) {
+ BasicBlock *DirtyBB = DirtyBlocks.back();
+ DirtyBlocks.pop_back();
- // If this inst is a memory op, get the pointer it accessed
- Value* pointer = 0;
- uint64_t pointerSize = 0;
- if (StoreInst* S = dyn_cast<StoreInst>(QI)) {
- // All volatile loads/stores depend on each other
- if (queryIsVolatile && S->isVolatile()) {
- if (!start && !block) {
- cachedResult.first = S;
- cachedResult.second = true;
- reverseDep[S].insert(query);
- }
-
- return S;
- }
-
- pointer = S->getPointerOperand();
- pointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType());
- } else if (LoadInst* L = dyn_cast<LoadInst>(QI)) {
- // All volatile loads/stores depend on each other
- if (queryIsVolatile && L->isVolatile()) {
- if (!start && !block) {
- cachedResult.first = L;
- cachedResult.second = true;
- reverseDep[L].insert(query);
- }
-
- return L;
- }
-
- pointer = L->getPointerOperand();
- pointerSize = TD.getTypeStoreSize(L->getType());
- } else if (AllocationInst* AI = dyn_cast<AllocationInst>(QI)) {
- pointer = AI;
- if (ConstantInt* C = dyn_cast<ConstantInt>(AI->getArraySize()))
- pointerSize = C->getZExtValue() * \
- TD.getABITypeSize(AI->getAllocatedType());
- else
- pointerSize = ~0UL;
- } else if (VAArgInst* V = dyn_cast<VAArgInst>(QI)) {
- pointer = V->getOperand(0);
- pointerSize = TD.getTypeStoreSize(V->getType());
- } else if (FreeInst* F = dyn_cast<FreeInst>(QI)) {
- pointer = F->getPointerOperand();
-
- // FreeInsts erase the entire structure
- pointerSize = ~0UL;
- } else if (CallSite::get(QI).getInstruction() != 0) {
- // Call insts need special handling. Check if they can modify our pointer
- AliasAnalysis::ModRefResult MR = AA.getModRefInfo(CallSite::get(QI),
- dependee, dependeeSize);
-
- if (MR != AliasAnalysis::NoModRef) {
- // Loads don't depend on read-only calls
- if (isa<LoadInst>(query) && MR == AliasAnalysis::Ref)
- continue;
-
- if (!start && !block) {
- cachedResult.first = QI;
- cachedResult.second = true;
- reverseDep[QI].insert(query);
- }
-
- return QI;
- } else {
+ // Already processed this block?
+ if (!Visited.insert(DirtyBB))
+ continue;
+
+ // Do a binary search to see if we already have an entry for this block in
+ // the cache set. If so, find it.
+ NonLocalDepInfo::iterator Entry =
+ std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
+ std::make_pair(DirtyBB, MemDepResult()));
+ if (Entry != Cache.begin() && (&*Entry)[-1].first == DirtyBB)
+ --Entry;
+
+ MemDepResult *ExistingResult = 0;
+ if (Entry != Cache.begin()+NumSortedEntries &&
+ Entry->first == DirtyBB) {
+ // If we already have an entry, and if it isn't already dirty, the block
+ // is done.
+ if (!Entry->second.isDirty())
continue;
- }
+
+ // Otherwise, remember this slot so we can update the value.
+ ExistingResult = &Entry->second;
}
- // If we found a pointer, check if it could be the same as our pointer
- if (pointer) {
- AliasAnalysis::AliasResult R = AA.alias(pointer, pointerSize,
- dependee, dependeeSize);
+ // If the dirty entry has a pointer, start scanning from it so we don't have
+ // to rescan the entire block.
+ BasicBlock::iterator ScanPos = DirtyBB->end();
+ if (ExistingResult) {
+ if (Instruction *Inst = ExistingResult->getInst()) {
+ ScanPos = Inst;
- if (R != AliasAnalysis::NoAlias) {
- // May-alias loads don't depend on each other
- if (isa<LoadInst>(query) && isa<LoadInst>(QI) &&
- R == AliasAnalysis::MayAlias)
- continue;
-
- if (!start && !block) {
- cachedResult.first = QI;
- cachedResult.second = true;
- reverseDep[QI].insert(query);
- }
-
- return QI;
+ // We're removing QueryInst's use of Inst.
+ SmallPtrSet<Instruction*, 4> &InstMap = ReverseNonLocalDeps[Inst];
+ InstMap.erase(QueryInst);
+ if (InstMap.empty()) ReverseNonLocalDeps.erase(Inst);
}
}
+
+ // Find out if this block has a local dependency for QueryInst.
+ MemDepResult Dep;
+ if (!isa<CallInst>(QueryInst) && !isa<InvokeInst>(QueryInst))
+ Dep = getDependencyFrom(QueryInst, ScanPos, DirtyBB);
+ else
+ Dep = getCallSiteDependencyFrom(CallSite::get(QueryInst), ScanPos,
+ DirtyBB);
+
+ // If we had a dirty entry for the block, update it. Otherwise, just add
+ // a new entry.
+ if (ExistingResult)
+ *ExistingResult = Dep;
+ else
+ Cache.push_back(std::make_pair(DirtyBB, Dep));
+
+ // If the block has a dependency (i.e. it isn't completely transparent to
+ // the value), remember the association!
+ if (!Dep.isNonLocal()) {
+ // Keep the ReverseNonLocalDeps map up to date so we can efficiently
+ // update this when we remove instructions.
+ if (Instruction *Inst = Dep.getInst())
+ ReverseNonLocalDeps[Inst].insert(QueryInst);
+ } else {
+
+ // If the block *is* completely transparent to the load, we need to check
+ // the predecessors of this block. Add them to our worklist.
+ DirtyBlocks.append(pred_begin(DirtyBB), pred_end(DirtyBB));
+ }
}
- // If we found nothing, return the non-local flag
- if (!start && !block) {
- cachedResult.first = NonLocal;
- cachedResult.second = true;
- reverseDep[NonLocal].insert(query);
- }
-
- return NonLocal;
+ return Cache;
}
-/// dropInstruction - Remove an instruction from the analysis, making
-/// absolutely conservative assumptions when updating the cache. This is
-/// useful, for example when an instruction is changed rather than removed.
-void MemoryDependenceAnalysis::dropInstruction(Instruction* drop) {
- depMapType::iterator depGraphEntry = depGraphLocal.find(drop);
- if (depGraphEntry != depGraphLocal.end())
- reverseDep[depGraphEntry->second.first].erase(drop);
-
- // Drop dependency information for things that depended on this instr
- SmallPtrSet<Instruction*, 4>& set = reverseDep[drop];
- for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end();
- I != E; ++I)
- depGraphLocal.erase(*I);
-
- depGraphLocal.erase(drop);
- reverseDep.erase(drop);
-
- for (DenseMap<BasicBlock*, Value*>::iterator DI =
- depGraphNonLocal[drop].begin(), DE = depGraphNonLocal[drop].end();
- DI != DE; ++DI)
- if (DI->second != None)
- reverseDepNonLocal[DI->second].erase(drop);
-
- if (reverseDepNonLocal.count(drop)) {
- SmallPtrSet<Instruction*, 4>& set = reverseDepNonLocal[drop];
- for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end();
- I != E; ++I)
- for (DenseMap<BasicBlock*, Value*>::iterator DI =
- depGraphNonLocal[*I].begin(), DE = depGraphNonLocal[*I].end();
- DI != DE; ++DI)
- if (DI->second == drop)
- DI->second = Dirty;
- }
-
- reverseDepNonLocal.erase(drop);
- nonLocalDepMapType::iterator I = depGraphNonLocal.find(drop);
- if (I != depGraphNonLocal.end())
- depGraphNonLocal.erase(I);
-}
/// removeInstruction - Remove an instruction from the dependence analysis,
/// updating the dependence of instructions that previously depended on it.
/// This method attempts to keep the cache coherent using the reverse map.
-void MemoryDependenceAnalysis::removeInstruction(Instruction* rem) {
- // Figure out the new dep for things that currently depend on rem
- Instruction* newDep = NonLocal;
-
- for (DenseMap<BasicBlock*, Value*>::iterator DI =
- depGraphNonLocal[rem].begin(), DE = depGraphNonLocal[rem].end();
- DI != DE; ++DI)
- if (DI->second != None)
- reverseDepNonLocal[DI->second].erase(rem);
+void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
+ // Walk through the Non-local dependencies, removing this one as the value
+ // for any cached queries.
+ NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
+ if (NLDI != NonLocalDeps.end()) {
+ NonLocalDepInfo &BlockMap = NLDI->second.first;
+ for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
+ DI != DE; ++DI)
+ if (Instruction *Inst = DI->second.getInst())
+ ReverseNonLocalDeps[Inst].erase(RemInst);
+ NonLocalDeps.erase(NLDI);
+ }
- depMapType::iterator depGraphEntry = depGraphLocal.find(rem);
+ // If we have a cached local dependence query for this instruction, remove it.
+ //
+ LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
+ if (LocalDepEntry != LocalDeps.end()) {
+ // Remove us from DepInst's reverse set now that the local dep info is gone.
+ if (Instruction *Inst = LocalDepEntry->second.getInst()) {
+ SmallPtrSet<Instruction*, 4> &RLD = ReverseLocalDeps[Inst];
+ RLD.erase(RemInst);
+ if (RLD.empty())
+ ReverseLocalDeps.erase(Inst);
+ }
- if (depGraphEntry != depGraphLocal.end()) {
- reverseDep[depGraphEntry->second.first].erase(rem);
+ // Remove this local dependency info.
+ LocalDeps.erase(LocalDepEntry);
+ }
+
+ // Loop over all of the things that depend on the instruction we're removing.
+ //
+ SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
+
+ ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
+ if (ReverseDepIt != ReverseLocalDeps.end()) {
+ SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second;
+ // RemInst can't be the terminator if it has stuff depending on it.
+ assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) &&
+ "Nothing can locally depend on a terminator");
- if (depGraphEntry->second.first != NonLocal &&
- depGraphEntry->second.first != None &&
- depGraphEntry->second.second) {
- // If we have dep info for rem, set them to it
- BasicBlock::iterator RI = depGraphEntry->second.first;
- RI++;
- newDep = RI;
- } else if ( (depGraphEntry->second.first == NonLocal ||
- depGraphEntry->second.first == None ) &&
- depGraphEntry->second.second ) {
- // If we have a confirmed non-local flag, use it
- newDep = depGraphEntry->second.first;
- } else {
- // Otherwise, use the immediate successor of rem
- // NOTE: This is because, when getDependence is called, it will first
- // check the immediate predecessor of what is in the cache.
- BasicBlock::iterator RI = rem;
- RI++;
- newDep = RI;
+ // Anything that was locally dependent on RemInst is now going to be
+ // dependent on the instruction after RemInst. It will have the dirty flag
+ // set so it will rescan. This saves having to scan the entire block to get
+ // to this point.
+ Instruction *NewDepInst = next(BasicBlock::iterator(RemInst));
+
+ for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(),
+ E = ReverseDeps.end(); I != E; ++I) {
+ Instruction *InstDependingOnRemInst = *I;
+ assert(InstDependingOnRemInst != RemInst &&
+ "Already removed our local dep info");
+
+ LocalDeps[InstDependingOnRemInst] = MemDepResult::getDirty(NewDepInst);
+
+ // Make sure to remember that new things depend on NewDepInst.
+ ReverseDepsToAdd.push_back(std::make_pair(NewDepInst,
+ InstDependingOnRemInst));
+ }
+
+ ReverseLocalDeps.erase(ReverseDepIt);
+
+ // Add new reverse deps after scanning the set, to avoid invalidating the
+ // 'ReverseDeps' reference.
+ while (!ReverseDepsToAdd.empty()) {
+ ReverseLocalDeps[ReverseDepsToAdd.back().first]
+ .insert(ReverseDepsToAdd.back().second);
+ ReverseDepsToAdd.pop_back();
}
- } else {
- // Otherwise, use the immediate successor of rem
- // NOTE: This is because, when getDependence is called, it will first
- // check the immediate predecessor of what is in the cache.
- BasicBlock::iterator RI = rem;
- RI++;
- newDep = RI;
}
- SmallPtrSet<Instruction*, 4>& set = reverseDep[rem];
- for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end();
- I != E; ++I) {
- // Insert the new dependencies
- // Mark it as unconfirmed as long as it is not the non-local flag
- depGraphLocal[*I] = std::make_pair(newDep, (newDep == NonLocal ||
- newDep == None));
+ ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
+ if (ReverseDepIt != ReverseNonLocalDeps.end()) {
+ SmallPtrSet<Instruction*, 4>& set = ReverseDepIt->second;
+ for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end();
+ I != E; ++I) {
+ assert(*I != RemInst && "Already removed NonLocalDep info for RemInst");
+
+ PerInstNLInfo &INLD = NonLocalDeps[*I];
+ // The information is now dirty!
+ INLD.second = true;
+
+ for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
+ DE = INLD.first.end(); DI != DE; ++DI) {
+ if (DI->second.getInst() != RemInst) continue;
+
+ // Convert to a dirty entry for the subsequent instruction.
+ Instruction *NextI = 0;
+ if (!RemInst->isTerminator()) {
+ NextI = next(BasicBlock::iterator(RemInst));
+ ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
+ }
+ DI->second = MemDepResult::getDirty(NextI);
+ }
+ }
+
+ ReverseNonLocalDeps.erase(ReverseDepIt);
+
+ // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
+ while (!ReverseDepsToAdd.empty()) {
+ ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
+ .insert(ReverseDepsToAdd.back().second);
+ ReverseDepsToAdd.pop_back();
+ }
}
- depGraphLocal.erase(rem);
- reverseDep.erase(rem);
+ assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
+ AA->deleteValue(RemInst);
+ DEBUG(verifyRemoved(RemInst));
+}
+
+/// verifyRemoved - Verify that the specified instruction does not occur
+/// in our internal data structures.
+void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
+ for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
+ E = LocalDeps.end(); I != E; ++I) {
+ assert(I->first != D && "Inst occurs in data structures");
+ assert(I->second.getInst() != D &&
+ "Inst occurs in data structures");
+ }
- if (reverseDepNonLocal.count(rem)) {
- SmallPtrSet<Instruction*, 4>& set = reverseDepNonLocal[rem];
- for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end();
- I != E; ++I)
- for (DenseMap<BasicBlock*, Value*>::iterator DI =
- depGraphNonLocal[*I].begin(), DE = depGraphNonLocal[*I].end();
- DI != DE; ++DI)
- if (DI->second == rem)
- DI->second = Dirty;
-
+ for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
+ E = NonLocalDeps.end(); I != E; ++I) {
+ assert(I->first != D && "Inst occurs in data structures");
+ const PerInstNLInfo &INLD = I->second;
+ for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
+ EE = INLD.first.end(); II != EE; ++II)
+ assert(II->second.getInst() != D && "Inst occurs in data structures");
}
- reverseDepNonLocal.erase(rem);
- nonLocalDepMapType::iterator I = depGraphNonLocal.find(rem);
- if (I != depGraphNonLocal.end())
- depGraphNonLocal.erase(I);
-
- getAnalysis<AliasAnalysis>().deleteValue(rem);
+ for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
+ E = ReverseLocalDeps.end(); I != E; ++I) {
+ assert(I->first != D && "Inst occurs in data structures");
+ for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
+ EE = I->second.end(); II != EE; ++II)
+ assert(*II != D && "Inst occurs in data structures");
+ }
+
+ for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
+ E = ReverseNonLocalDeps.end();
+ I != E; ++I) {
+ assert(I->first != D && "Inst occurs in data structures");
+ for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
+ EE = I->second.end(); II != EE; ++II)
+ assert(*II != D && "Inst occurs in data structures");
+ }
}