//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/AtomicExpandUtils.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
return new AtomicExpand(TM);
}
+
+namespace {
+
+bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal);
+Value* GetUntaintedAddress(Value* CurrentAddress);
+
+// The depth we trace down a variable to look for its dependence set.
+const unsigned kDependenceDepth = 4;
+
+// Recursively looks for variables that 'Val' depends on at the given depth
+// 'Depth', and adds them in 'DepSet'. If 'InsertOnlyLeafNodes' is true, only
+// inserts the leaf node values; otherwise, all visited nodes are included in
+// 'DepSet'. Note that constants will be ignored.
+template <typename SetType>
+void recursivelyFindDependence(SetType* DepSet, Value* Val,
+ bool InsertOnlyLeafNodes = false,
+ unsigned Depth = kDependenceDepth) {
+ if (Val == nullptr) {
+ return;
+ }
+ if (!InsertOnlyLeafNodes && !isa<Constant>(Val)) {
+ DepSet->insert(Val);
+ }
+ if (Depth == 0) {
+ // Cannot go deeper. Insert the leaf nodes.
+ if (InsertOnlyLeafNodes && !isa<Constant>(Val)) {
+ DepSet->insert(Val);
+ }
+ return;
+ }
+
+ // Go one step further to explore the dependence of the operands.
+ Instruction* I = nullptr;
+ if ((I = dyn_cast<Instruction>(Val))) {
+ if (isa<LoadInst>(I)) {
+ // A load is considerd the leaf load of the dependence tree. Done.
+ DepSet->insert(Val);
+ return;
+ } else if (I->isBinaryOp()) {
+ BinaryOperator* I = dyn_cast<BinaryOperator>(Val);
+ Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, Depth - 1);
+ } else if (I->isCast()) {
+ Value* Op0 = I->getOperand(0);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ } else if (I->getOpcode() == Instruction::Select) {
+ Value* Op0 = I->getOperand(0);
+ Value* Op1 = I->getOperand(1);
+ Value* Op2 = I->getOperand(2);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, Depth - 1);
+ recursivelyFindDependence(DepSet, Op2, Depth - 1);
+ } else if (I->getOpcode() == Instruction::GetElementPtr) {
+ for (unsigned i = 0; i < I->getNumOperands(); i++) {
+ recursivelyFindDependence(DepSet, I->getOperand(i), Depth - 1);
+ }
+ } else if (I->getOpcode() == Instruction::Store) {
+ auto* SI = dyn_cast<StoreInst>(Val);
+ recursivelyFindDependence(DepSet, SI->getPointerOperand(), Depth - 1);
+ recursivelyFindDependence(DepSet, SI->getValueOperand(), Depth - 1);
+ } else {
+ Value* Op0 = nullptr;
+ Value* Op1 = nullptr;
+ switch (I->getOpcode()) {
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ Op0 = I->getOperand(0);
+ Op1 = I->getOperand(1);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, Depth - 1);
+ break;
+ }
+ default: {
+ // Be conservative. Add it and be done with it.
+ DepSet->insert(Val);
+ return;
+ }
+ }
+ }
+ } else if (isa<Constant>(Val)) {
+ // Not interested in constant values. Done.
+ return;
+ } else {
+ // Be conservative. Add it and be done with it.
+ DepSet->insert(Val);
+ return;
+ }
+}
+
+// Helper function to create a Cast instruction.
+Value* createCast(IRBuilder<true, NoFolder>& Builder, Value* DepVal,
+ Type* TargetIntegerType) {
+ Instruction::CastOps CastOp = Instruction::BitCast;
+ switch (DepVal->getType()->getTypeID()) {
+ case Type::IntegerTyID: {
+ CastOp = Instruction::SExt;
+ break;
+ }
+ case Type::FloatTyID:
+ case Type::DoubleTyID: {
+ CastOp = Instruction::FPToSI;
+ break;
+ }
+ case Type::PointerTyID: {
+ CastOp = Instruction::PtrToInt;
+ break;
+ }
+ default: { break; }
+ }
+
+ return Builder.CreateCast(CastOp, DepVal, TargetIntegerType);
+}
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that ORs the "dependence value" with the "original address".
+// Otherwise, returns nullptr. This instruction is the first OR instruction
+// where one of its operand is an AND instruction with an operand being 0.
+//
+// E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getOrAddress(Value* CurrentAddress) {
+ // Is it a cast from integer to pointer type.
+ Instruction* OrAddress = nullptr;
+ Instruction* AndDep = nullptr;
+ Instruction* CastToInt = nullptr;
+ Value* ActualAddress = nullptr;
+ Constant* ZeroConst = nullptr;
+
+ const Instruction* CastToPtr = dyn_cast<Instruction>(CurrentAddress);
+ if (CastToPtr && CastToPtr->getOpcode() == Instruction::IntToPtr) {
+ // Is it an OR instruction: %1 = or %and, %actualAddress.
+ if ((OrAddress = dyn_cast<Instruction>(CastToPtr->getOperand(0))) &&
+ OrAddress->getOpcode() == Instruction::Or) {
+ // The first operand should be and AND instruction.
+ AndDep = dyn_cast<Instruction>(OrAddress->getOperand(0));
+ if (AndDep && AndDep->getOpcode() == Instruction::And) {
+ // Also make sure its first operand of the "AND" is 0, or the "AND" is
+ // marked explicitly by "NoInstCombine".
+ if ((ZeroConst = dyn_cast<Constant>(AndDep->getOperand(1))) &&
+ ZeroConst->isNullValue()) {
+ return OrAddress;
+ }
+ }
+ }
+ }
+ // Looks like it's not been tainted.
+ return nullptr;
+}
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that taints the "dependence value". Otherwise, returns nullptr.
+// This instruction is the last AND instruction where one of its operand is 0.
+// E.g., it returns '%3' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getAndDependence(Value* CurrentAddress) {
+ // If 'CurrentAddress' is tainted, get the OR instruction.
+ auto* OrAddress = getOrAddress(CurrentAddress);
+ if (OrAddress == nullptr) {
+ return nullptr;
+ }
+
+ // No need to check the operands.
+ auto* AndDepInst = dyn_cast<Instruction>(OrAddress->getOperand(0));
+ assert(AndDepInst);
+ return AndDepInst;
+}
+
+// Given a value, if it's a tainted address, this function returns
+// the "dependence value", which is the first operand in the AND instruction.
+// E.g., it returns '%1' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Value* getDependence(Value* CurrentAddress) {
+ auto* AndInst = getAndDependence(CurrentAddress);
+ if (AndInst == nullptr) {
+ return nullptr;
+ }
+ return AndInst->getOperand(0);
+}
+
+// Given an address that has been tainted, returns the only condition it depends
+// on, if any; otherwise, returns nullptr.
+Value* getConditionDependence(Value* Address) {
+ auto* Dep = getDependence(Address);
+ if (Dep == nullptr) {
+ // 'Address' has not been dependence-tainted.
+ return nullptr;
+ }
+
+ Value* Operand = Dep;
+ while (true) {
+ auto* Inst = dyn_cast<Instruction>(Operand);
+ if (Inst == nullptr) {
+ // Non-instruction type does not have condition dependence.
+ return nullptr;
+ }
+ if (Inst->getOpcode() == Instruction::ICmp) {
+ return Inst;
+ } else {
+ if (Inst->getNumOperands() != 1) {
+ return nullptr;
+ } else {
+ Operand = Inst->getOperand(0);
+ }
+ }
+ }
+}
+
+// Conservatively decides whether the dependence set of 'Val1' includes the
+// dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand
+// 'Val2' and use that single value as its dependence set.
+// If it returns true, it means the dependence set of 'Val1' includes that of
+// 'Val2'; otherwise, it only means we cannot conclusively decide it.
+bool dependenceSetInclusion(Value* Val1, Value* Val2,
+ int Val1ExpandLevel = 2 * kDependenceDepth,
+ int Val2ExpandLevel = kDependenceDepth) {
+ typedef SmallSet<Value*, 8> IncludingSet;
+ typedef SmallSet<Value*, 4> IncludedSet;
+
+ IncludingSet DepSet1;
+ IncludedSet DepSet2;
+ // Look for more depths for the including set.
+ recursivelyFindDependence(&DepSet1, Val1, false /*Insert all visited nodes*/,
+ Val1ExpandLevel);
+ recursivelyFindDependence(&DepSet2, Val2, true /*Only insert leaf nodes*/,
+ Val2ExpandLevel);
+
+ auto set_inclusion = [](IncludingSet FullSet, IncludedSet Subset) {
+ for (auto* Dep : Subset) {
+ if (0 == FullSet.count(Dep)) {
+ return false;
+ }
+ }
+ return true;
+ };
+ bool inclusion = set_inclusion(DepSet1, DepSet2);
+ DEBUG(dbgs() << "[dependenceSetInclusion]: " << inclusion << "\n");
+ DEBUG(dbgs() << "Including set for: " << *Val1 << "\n");
+ DEBUG(for (const auto* Dep : DepSet1) { dbgs() << "\t\t" << *Dep << "\n"; });
+ DEBUG(dbgs() << "Included set for: " << *Val2 << "\n");
+ DEBUG(for (const auto* Dep : DepSet2) { dbgs() << "\t\t" << *Dep << "\n"; });
+
+ return inclusion;
+}
+
+// Recursively iterates through the operands spawned from 'DepVal'. If there
+// exists a single value that 'DepVal' only depends on, we call that value the
+// root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'.
+Value* getRootDependence(Value* DepVal) {
+ SmallSet<Value*, 8> DepSet;
+ for (unsigned depth = kDependenceDepth; depth > 0; --depth) {
+ recursivelyFindDependence(&DepSet, DepVal, true /*Only insert leaf nodes*/,
+ depth);
+ if (DepSet.size() == 1) {
+ return *DepSet.begin();
+ }
+ DepSet.clear();
+ }
+ return DepVal;
+}
+
+// This function actually taints 'DepVal' to the address to 'SI'. If the
+// address
+// of 'SI' already depends on whatever 'DepVal' depends on, this function
+// doesn't do anything and returns false. Otherwise, returns true.
+//
+// This effect forces the store and any stores that comes later to depend on
+// 'DepVal'. For example, we have a condition "cond", and a store instruction
+// "s: STORE addr, val". If we want "s" (and any later store) to depend on
+// "cond", we do the following:
+// %conv = sext i1 %cond to i32
+// %addrVal = ptrtoint i32* %addr to i32
+// %andCond = and i32 conv, 0;
+// %orAddr = or i32 %andCond, %addrVal;
+// %NewAddr = inttoptr i32 %orAddr to i32*;
+//
+// This is a more concrete example:
+// ------
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+bool taintStoreAddress(StoreInst* SI, Value* DepVal,
+ const char* calling_func = __builtin_FUNCTION()) {
+ DEBUG(dbgs() << "Called from " << calling_func << '\n');
+ IRBuilder<true, NoFolder> Builder(SI);
+ BasicBlock* BB = SI->getParent();
+ Value* Address = SI->getPointerOperand();
+ Type* TargetIntegerType =
+ IntegerType::get(Address->getContext(),
+ BB->getModule()->getDataLayout().getPointerSizeInBits());
+
+ // Does SI's address already depends on whatever 'DepVal' depends on?
+ if (StoreAddressDependOnValue(SI, DepVal)) {
+ return false;
+ }
+
+ // Figure out if there's a root variable 'DepVal' depends on. For example, we
+ // can extract "getelementptr inbounds %struct, %struct* %0, i64 0, i32 123"
+ // to be "%struct* %0" since all other operands are constant.
+ DepVal = getRootDependence(DepVal);
+
+ // Is this already a dependence-tainted store?
+ Value* OldDep = getDependence(Address);
+ if (OldDep) {
+ // The address of 'SI' has already been tainted. Just need to absorb the
+ // DepVal to the existing dependence in the address of SI.
+ Instruction* AndDep = getAndDependence(Address);
+ IRBuilder<true, NoFolder> Builder(AndDep);
+ Value* NewDep = nullptr;
+ if (DepVal->getType() == AndDep->getType()) {
+ NewDep = Builder.CreateAnd(OldDep, DepVal);
+ } else {
+ NewDep = Builder.CreateAnd(
+ OldDep, createCast(Builder, DepVal, TargetIntegerType));
+ }
+
+ auto* NewDepInst = dyn_cast<Instruction>(NewDep);
+
+ // Use the new AND instruction as the dependence
+ AndDep->setOperand(0, NewDep);
+ return true;
+ }
+
+ // SI's address has not been tainted. Now taint it with 'DepVal'.
+ Value* CastDepToInt = createCast(Builder, DepVal, TargetIntegerType);
+ Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType);
+ Value* AndDepVal =
+ Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0));
+ auto AndInst = dyn_cast<Instruction>(AndDepVal);
+ // XXX-comment: The original IR InstCombiner would change our and instruction
+ // to a select and then the back end optimize the condition out. We attach a
+ // flag to instructions and set it here to inform the InstCombiner to not to
+ // touch this and instruction at all.
+ Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast);
+ Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType());
+
+ DEBUG(dbgs() << "[taintStoreAddress]\n"
+ << "Original store: " << *SI << '\n');
+ SI->setOperand(1, NewAddr);
+
+ // Debug output.
+ DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n'
+ << "\tCast dependence value to integer: " << *CastDepToInt
+ << '\n'
+ << "\tCast address to integer: " << *PtrToIntCast << '\n'
+ << "\tAnd dependence value: " << *AndDepVal << '\n'
+ << "\tOr address: " << *OrAddr << '\n'
+ << "\tCast or instruction to address: " << *NewAddr << "\n\n");
+
+ return true;
+}
+
+// Looks for the previous store in the if block --- 'BrBB', which makes the
+// speculative store 'StoreToHoist' safe.
+Value* getSpeculativeStoreInPrevBB(StoreInst* StoreToHoist, BasicBlock* BrBB) {
+ assert(StoreToHoist && "StoreToHoist must be a real store");
+
+ Value* StorePtr = StoreToHoist->getPointerOperand();
+
+ // Look for a store to the same pointer in BrBB.
+ for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend();
+ RI != RE; ++RI) {
+ Instruction* CurI = &*RI;
+
+ StoreInst* SI = dyn_cast<StoreInst>(CurI);
+ // Found the previous store make sure it stores to the same location.
+ // XXX-update: If the previous store's original untainted address are the
+ // same as 'StorePtr', we are also good to hoist the store.
+ if (SI && (SI->getPointerOperand() == StorePtr ||
+ GetUntaintedAddress(SI->getPointerOperand()) == StorePtr)) {
+ // Found the previous store, return its value operand.
+ return SI;
+ }
+ }
+
+ assert(false &&
+ "We should not reach here since this store is safe to speculate");
+}
+
+// XXX-comment: Returns true if it changes the code, false otherwise (the branch
+// condition already depends on 'DepVal'.
+bool taintConditionalBranch(BranchInst* BI, Value* DepVal) {
+ assert(BI->isConditional());
+ auto* Cond = BI->getOperand(0);
+ if (dependenceSetInclusion(Cond, DepVal)) {
+ // The dependence/ordering is self-evident.
+ return false;
+ }
+
+ IRBuilder<true, NoFolder> Builder(BI);
+ auto* AndDep =
+ Builder.CreateAnd(DepVal, ConstantInt::get(DepVal->getType(), 0));
+ auto* TruncAndDep =
+ Builder.CreateTrunc(AndDep, IntegerType::get(DepVal->getContext(), 1));
+ auto* OrCond = Builder.CreateOr(TruncAndDep, Cond);
+ BI->setOperand(0, OrCond);
+ return true;
+}
+
+// XXX-update: For a relaxed load 'LI', find the first immediate atomic store or
+// the first conditional branch. Returns itself if 'LI' can be left as is;
+// returns nullptr if there's no such immediately following store/branch
+// instructions, which we can only enforce the load with 'acquire'.
+Instruction* findFirstStoreCondBranchInst(LoadInst* LI) {
+ // In some situations, relaxed loads can be left as is:
+ // 1. The relaxed load is used to calculate the address of the immediate
+ // following store;
+ // 2. The relaxed load is used as a condition in the immediate following
+ // condition, and there are no stores in between. This is actually quite
+ // common. E.g.,
+ // int r1 = x.load(relaxed);
+ // if (r1 != 0) {
+ // y.store(1, relaxed);
+ // }
+
+ auto* BB = LI->getParent();
+ auto BE = BB->end();
+ auto BBI = BasicBlock::iterator(LI);
+ BBI++;
+ while (true) {
+ for (; BBI != BE; BBI++) {
+ auto* Inst = dyn_cast<Instruction>(&*BBI);
+ if (Inst == nullptr) {
+ continue;
+ }
+ if (Inst->getOpcode() == Instruction::Store) {
+ return Inst;
+ } else if (Inst->getOpcode() == Instruction::Br) {
+ auto* BrInst = dyn_cast<BranchInst>(Inst);
+ if (BrInst->isConditional()) {
+ return Inst;
+ } else {
+ // Reinitialize iterators with the destination of the unconditional
+ // branch.
+ BB = BrInst->getSuccessor(0);
+ BBI = BB->begin();
+ BE = BB->end();
+ break;
+ }
+ }
+ }
+ if (BBI == BE) {
+ return LI;
+ }
+ }
+}
+
+void taintMonotonicLoads(const SmallVector<LoadInst*, 1>& MonotonicLoadInsts) {
+ for (auto* LI : MonotonicLoadInsts) {
+ auto* FirstInst = findFirstStoreCondBranchInst(LI);
+ if (FirstInst == nullptr) {
+ // No need to worry about the relaxed load.
+ continue;
+ }
+ if (FirstInst == LI) {
+ // We don't seem to be able to taint a following store/conditional branch
+ // instruction. Simply make it acquire.
+ LI->setOrdering(Acquire);
+ continue;
+ }
+ // Taint 'FirstInst', which could be a store or a condition branch
+ // instruction.
+ if (FirstInst->getOpcode() == Instruction::Store) {
+ taintStoreAddress(dyn_cast<StoreInst>(FirstInst), LI);
+ } else if (FirstInst->getOpcode() == Instruction::Br) {
+ taintConditionalBranch(dyn_cast<BranchInst>(FirstInst), LI);
+ } else {
+ assert(false && "findFirstStoreCondBranchInst() should return a "
+ "store/condition branch instruction");
+ }
+ }
+}
+
+/**** Implementations of public methods for dependence tainting ****/
+Value* GetUntaintedAddress(Value* CurrentAddress) {
+ auto* OrAddress = getOrAddress(CurrentAddress);
+ if (OrAddress == nullptr) {
+ // Is it tainted by a select instruction?
+ auto* Inst = dyn_cast<Instruction>(CurrentAddress);
+ if (nullptr != Inst && Inst->getOpcode() == Instruction::Select) {
+ // A selection instruction.
+ if (Inst->getOperand(1) == Inst->getOperand(2)) {
+ return Inst->getOperand(1);
+ }
+ }
+
+ return CurrentAddress;
+ }
+ Value* ActualAddress = nullptr;
+
+ auto* CastToInt = dyn_cast<Instruction>(OrAddress->getOperand(1));
+ if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) {
+ return CastToInt->getOperand(0);
+ } else {
+ // This should be a IntToPtr constant expression.
+ ConstantExpr* PtrToIntExpr =
+ dyn_cast<ConstantExpr>(OrAddress->getOperand(1));
+ if (PtrToIntExpr && PtrToIntExpr->getOpcode() == Instruction::PtrToInt) {
+ return PtrToIntExpr->getOperand(0);
+ }
+ }
+
+ // Looks like it's not been dependence-tainted. Returns itself.
+ return CurrentAddress;
+}
+
+MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI) {
+ AAMDNodes AATags;
+ SI->getAAMetadata(AATags);
+ const auto& DL = SI->getModule()->getDataLayout();
+ const auto* OriginalAddr = GetUntaintedAddress(SI->getPointerOperand());
+ DEBUG(if (OriginalAddr != SI->getPointerOperand()) {
+ dbgs() << "[GetUntaintedMemoryLocation]\n"
+ << "Storing address: " << *SI->getPointerOperand()
+ << "\nUntainted address: " << *OriginalAddr << "\n";
+ });
+ return MemoryLocation(OriginalAddr,
+ DL.getTypeStoreSize(SI->getValueOperand()->getType()),
+ AATags);
+}
+
+bool TaintDependenceToStore(StoreInst* SI, Value* DepVal) {
+ if (dependenceSetInclusion(SI, DepVal)) {
+ return false;
+ }
+
+ bool tainted = taintStoreAddress(SI, DepVal);
+ assert(tainted);
+ return tainted;
+}
+
+bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal) {
+ if (dependenceSetInclusion(SI->getPointerOperand(), DepVal)) {
+ return false;
+ }
+
+ bool tainted = taintStoreAddress(SI, DepVal);
+ assert(tainted);
+ return tainted;
+}
+
+bool CompressTaintedStore(BasicBlock* BB) {
+ // This function looks for windows of adajcent stores in 'BB' that satisfy the
+ // following condition (and then do optimization):
+ // *Addr(d1) = v1, d1 is a condition and is the only dependence the store's
+ // address depends on && Dep(v1) includes Dep(d1);
+ // *Addr(d2) = v2, d2 is a condition and is the only dependnece the store's
+ // address depends on && Dep(v2) includes Dep(d2) &&
+ // Dep(d2) includes Dep(d1);
+ // ...
+ // *Addr(dN) = vN, dN is a condition and is the only dependence the store's
+ // address depends on && Dep(dN) includes Dep(d"N-1").
+ //
+ // As a result, Dep(dN) includes [Dep(d1) V ... V Dep(d"N-1")], so we can
+ // safely transform the above to the following. In between these stores, we
+ // can omit untainted stores to the same address 'Addr' since they internally
+ // have dependence on the previous stores on the same address.
+ // =>
+ // *Addr = v1
+ // *Addr = v2
+ // *Addr(d3) = v3
+ for (auto BI = BB->begin(), BE = BB->end(); BI != BE; BI++) {
+ // Look for the first store in such a window of adajacent stores.
+ auto* FirstSI = dyn_cast<StoreInst>(&*BI);
+ if (!FirstSI) {
+ continue;
+ }
+
+ // The first store in the window must be tainted.
+ auto* UntaintedAddress = GetUntaintedAddress(FirstSI->getPointerOperand());
+ if (UntaintedAddress == FirstSI->getPointerOperand()) {
+ continue;
+ }
+
+ // The first store's address must directly depend on and only depend on a
+ // condition.
+ auto* FirstSIDepCond = getConditionDependence(FirstSI->getPointerOperand());
+ if (nullptr == FirstSIDepCond) {
+ continue;
+ }
+
+ // Dep(first store's storing value) includes Dep(tainted dependence).
+ if (!dependenceSetInclusion(FirstSI->getValueOperand(), FirstSIDepCond)) {
+ continue;
+ }
+
+ // Look for subsequent stores to the same address that satisfy the condition
+ // of "compressing the dependence".
+ SmallVector<StoreInst*, 8> AdajacentStores;
+ AdajacentStores.push_back(FirstSI);
+ auto BII = BasicBlock::iterator(FirstSI);
+ for (BII++; BII != BE; BII++) {
+ auto* CurrSI = dyn_cast<StoreInst>(&*BII);
+ if (!CurrSI) {
+ if (BII->mayHaveSideEffects()) {
+ // Be conservative. Instructions with side effects are similar to
+ // stores.
+ break;
+ }
+ continue;
+ }
+
+ auto* OrigAddress = GetUntaintedAddress(CurrSI->getPointerOperand());
+ auto* CurrSIDepCond = getConditionDependence(CurrSI->getPointerOperand());
+ // All other stores must satisfy either:
+ // A. 'CurrSI' is an untainted store to the same address, or
+ // B. the combination of the following 5 subconditions:
+ // 1. Tainted;
+ // 2. Untainted address is the same as the group's address;
+ // 3. The address is tainted with a sole value which is a condition;
+ // 4. The storing value depends on the condition in 3.
+ // 5. The condition in 3 depends on the previous stores dependence
+ // condition.
+
+ // Condition A. Should ignore this store directly.
+ if (OrigAddress == CurrSI->getPointerOperand() &&
+ OrigAddress == UntaintedAddress) {
+ continue;
+ }
+ // Check condition B.
+ Value* Cond = nullptr;
+ if (OrigAddress == CurrSI->getPointerOperand() ||
+ OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr ||
+ !dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) {
+ // Check condition 1, 2, 3 & 4.
+ break;
+ }
+
+ // Check condition 5.
+ StoreInst* PrevSI = AdajacentStores[AdajacentStores.size() - 1];
+ auto* PrevSIDepCond = getConditionDependence(PrevSI->getPointerOperand());
+ assert(PrevSIDepCond &&
+ "Store in the group must already depend on a condtion");
+ if (!dependenceSetInclusion(CurrSIDepCond, PrevSIDepCond)) {
+ break;
+ }
+
+ AdajacentStores.push_back(CurrSI);
+ }
+
+ if (AdajacentStores.size() == 1) {
+ // The outer loop should keep looking from the next store.
+ continue;
+ }
+
+ // Now we have such a group of tainted stores to the same address.
+ DEBUG(dbgs() << "[CompressTaintedStore]\n");
+ DEBUG(dbgs() << "Original BB\n");
+ DEBUG(dbgs() << *BB << '\n');
+ auto* LastSI = AdajacentStores[AdajacentStores.size() - 1];
+ for (int i = 0; i < AdajacentStores.size() - 1; ++i) {
+ auto* SI = AdajacentStores[i];
+
+ // Use the original address for stores before the last one.
+ SI->setOperand(1, UntaintedAddress);
+
+ DEBUG(dbgs() << "Store address has been reversed: " << *SI << '\n';);
+ }
+ // XXX-comment: Try to make the last store use fewer registers.
+ // If LastSI's storing value is a select based on the condition with which
+ // its address is tainted, transform the tainted address to a select
+ // instruction, as follows:
+ // r1 = Select Cond ? A : B
+ // r2 = Cond & 0
+ // r3 = Addr | r2
+ // *r3 = r1
+ // ==>
+ // r1 = Select Cond ? A : B
+ // r2 = Select Cond ? Addr : Addr
+ // *r2 = r1
+ // The idea is that both Select instructions depend on the same condition,
+ // so hopefully the backend can generate two cmov instructions for them (and
+ // this saves the number of registers needed).
+ auto* LastSIDep = getConditionDependence(LastSI->getPointerOperand());
+ auto* LastSIValue = dyn_cast<Instruction>(LastSI->getValueOperand());
+ if (LastSIValue && LastSIValue->getOpcode() == Instruction::Select &&
+ LastSIValue->getOperand(0) == LastSIDep) {
+ // XXX-comment: Maybe it's better for us to just leave it as an and/or
+ // dependence pattern.
+ // /*
+ IRBuilder<true, NoFolder> Builder(LastSI);
+ auto* Address =
+ Builder.CreateSelect(LastSIDep, UntaintedAddress, UntaintedAddress);
+ LastSI->setOperand(1, Address);
+ DEBUG(dbgs() << "The last store becomes :" << *LastSI << "\n\n";);
+ // */
+ }
+ }
+
+ return true;
+}
+
+bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore) {
+ Value* OldDep = getDependence(OldAddress);
+ // Return false when there's no dependence to pass from the OldAddress.
+ if (!OldDep) {
+ return false;
+ }
+
+ // No need to pass the dependence to NewStore's address if it already depends
+ // on whatever 'OldAddress' depends on.
+ if (StoreAddressDependOnValue(NewStore, OldDep)) {
+ return false;
+ }
+ return taintStoreAddress(NewStore, OldAddress);
+}
+
+SmallSet<Value*, 8> FindDependence(Value* Val) {
+ SmallSet<Value*, 8> DepSet;
+ recursivelyFindDependence(&DepSet, Val, true /*Only insert leaf nodes*/);
+ return DepSet;
+}
+
+bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) {
+ return dependenceSetInclusion(SI->getPointerOperand(), DepVal);
+}
+
+bool StoreDependOnValue(StoreInst* SI, Value* Dep) {
+ return dependenceSetInclusion(SI, Dep);
+}
+
+} // namespace
+
+
bool AtomicExpand::runOnFunction(Function &F) {
if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
return false;
TLI = TM->getSubtargetImpl(F)->getTargetLowering();
SmallVector<Instruction *, 1> AtomicInsts;
+ SmallVector<LoadInst*, 1> MonotonicLoadInsts;
// Changing control-flow while iterating through it is a bad idea, so gather a
// list of all atomic instructions before we start.
if (I->isAtomic()) {
switch (I->getOpcode()) {
case Instruction::AtomicCmpXchg: {
+ // XXX-comment: AtomicCmpXchg in AArch64 will be translated to a
+ // conditional branch that contains the value of the load anyway, so
+ // we don't need to do anything.
+ /*
auto* CmpXchg = dyn_cast<AtomicCmpXchgInst>(&*I);
auto SuccOrdering = CmpXchg->getSuccessOrdering();
if (SuccOrdering == Monotonic) {
} else if (SuccOrdering == Release) {
CmpXchg->setSuccessOrdering(AcquireRelease);
}
+ */
break;
}
case Instruction::AtomicRMW: {
+ // XXX-comment: Similar to AtomicCmpXchg. These instructions in
+ // AArch64 will be translated to a loop whose condition depends on the
+ // store status, which further depends on the load value.
+ /*
auto* RMW = dyn_cast<AtomicRMWInst>(&*I);
if (RMW->getOrdering() == Monotonic) {
RMW->setOrdering(Acquire);
}
+ */
break;
}
case Instruction::Load: {
auto* LI = dyn_cast<LoadInst>(&*I);
if (LI->getOrdering() == Monotonic) {
+ /*
+ DEBUG(dbgs() << "Transforming relaxed loads to acquire loads: "
+ << *LI << '\n');
LI->setOrdering(Acquire);
+ */
+ MonotonicLoadInsts.push_back(LI);
}
break;
}
MadeChange |= expandAtomicCmpXchg(CASI);
}
}
+
+ taintMonotonicLoads(MonotonicLoadInsts);
+
return MadeChange;
}