--- /dev/null
+//===- TaintRelaxedAtomicsUtil.cpp - Utils for tainting relaxed atomics --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TaintRelaxedAtomicsUtils.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/NoFolder.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include "llvm/Transforms/Utils/BypassSlowDivision.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
+using namespace llvm;
+using namespace llvm::PatternMatch;
+
+#define DEBUG_TYPE "taintrelaxedatomics"
+
+namespace llvm {
+
+// The depth we trace down a variable to look for its dependence set.
+const unsigned kDependenceDepth = 4;
+
+// Recursively looks for variables that 'Val' depends on at the given depth
+// 'Depth', and adds them in 'DepSet'. If 'InsertOnlyLeafNodes' is true, only
+// inserts the leaf node values; otherwise, all visited nodes are included in
+// 'DepSet'. Note that constants will be ignored.
+template <typename SetType>
+void recursivelyFindDependence(SetType* DepSet, Value* Val,
+ bool InsertOnlyLeafNodes = false,
+ unsigned Depth = kDependenceDepth) {
+ if (Val == nullptr) {
+ return;
+ }
+ if (!InsertOnlyLeafNodes && !isa<Constant>(Val)) {
+ DepSet->insert(Val);
+ }
+ if (Depth == 0) {
+ // Cannot go deeper. Insert the leaf nodes.
+ if (InsertOnlyLeafNodes && !isa<Constant>(Val)) {
+ DepSet->insert(Val);
+ }
+ return;
+ }
+
+ // Go one step further to explore the dependence of the operands.
+ Instruction* I = nullptr;
+ if ((I = dyn_cast<Instruction>(Val))) {
+ if (isa<LoadInst>(I)) {
+ // A load is considerd the leaf load of the dependence tree. Done.
+ DepSet->insert(Val);
+ return;
+ } else if (I->isBinaryOp()) {
+ BinaryOperator* I = dyn_cast<BinaryOperator>(Val);
+ Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1);
+ recursivelyFindDependence(DepSet, Op0, InsertOnlyLeafNodes, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, InsertOnlyLeafNodes, Depth - 1);
+ } else if (I->isCast()) {
+ Value* Op0 = I->getOperand(0);
+ recursivelyFindDependence(DepSet, Op0, InsertOnlyLeafNodes, Depth - 1);
+ } else if (I->getOpcode() == Instruction::Select) {
+ Value* Op0 = I->getOperand(0);
+ Value* Op1 = I->getOperand(1);
+ Value* Op2 = I->getOperand(2);
+ recursivelyFindDependence(DepSet, Op0, InsertOnlyLeafNodes, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, InsertOnlyLeafNodes, Depth - 1);
+ recursivelyFindDependence(DepSet, Op2, InsertOnlyLeafNodes, Depth - 1);
+ } else if (I->getOpcode() == Instruction::GetElementPtr) {
+ for (unsigned i = 0; i < I->getNumOperands(); i++) {
+ recursivelyFindDependence(DepSet, I->getOperand(i), InsertOnlyLeafNodes,
+ Depth - 1);
+ }
+ } else if (I->getOpcode() == Instruction::Store) {
+ auto* SI = dyn_cast<StoreInst>(Val);
+ recursivelyFindDependence(DepSet, SI->getPointerOperand(),
+ InsertOnlyLeafNodes, Depth - 1);
+ recursivelyFindDependence(DepSet, SI->getValueOperand(),
+ InsertOnlyLeafNodes, Depth - 1);
+ } else {
+ Value* Op0 = nullptr;
+ Value* Op1 = nullptr;
+ switch (I->getOpcode()) {
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ Op0 = I->getOperand(0);
+ Op1 = I->getOperand(1);
+ recursivelyFindDependence(DepSet, Op0, InsertOnlyLeafNodes,
+ Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, InsertOnlyLeafNodes,
+ Depth - 1);
+ break;
+ }
+ case Instruction::PHI: {
+ for (unsigned i = 0; i < I->getNumOperands(); i++) {
+ auto* op = I->getOperand(i);
+ if (DepSet->count(op) == 0) {
+ recursivelyFindDependence(DepSet, I->getOperand(i),
+ InsertOnlyLeafNodes, Depth - 1);
+ }
+ }
+ break;
+ }
+ default: {
+ // Be conservative. Add it and be done with it.
+ DepSet->insert(Val);
+ return;
+ }
+ }
+ }
+ } else if (isa<Constant>(Val)) {
+ // Not interested in constant values. Done.
+ return;
+ } else {
+ // Be conservative. Add it and be done with it.
+ DepSet->insert(Val);
+ return;
+ }
+}
+
+// Helper function to create a Cast instruction.
+template <typename BuilderTy>
+Value* createCast(BuilderTy& Builder, Value* DepVal,
+ Type* TargetIntegerType) {
+ Instruction::CastOps CastOp = Instruction::BitCast;
+ switch (DepVal->getType()->getTypeID()) {
+ case Type::IntegerTyID: {
+ assert(TargetIntegerType->getTypeID() == Type::IntegerTyID);
+ auto* FromType = dyn_cast<IntegerType>(DepVal->getType());
+ auto* ToType = dyn_cast<IntegerType>(TargetIntegerType);
+ assert(FromType && ToType);
+ if (FromType->getBitWidth() <= ToType->getBitWidth()) {
+ CastOp = Instruction::ZExt;
+ } else {
+ CastOp = Instruction::Trunc;
+ }
+ break;
+ }
+ case Type::FloatTyID:
+ case Type::DoubleTyID: {
+ CastOp = Instruction::FPToSI;
+ break;
+ }
+ case Type::PointerTyID: {
+ CastOp = Instruction::PtrToInt;
+ break;
+ }
+ default: { break; }
+ }
+
+ return Builder.CreateCast(CastOp, DepVal, TargetIntegerType);
+}
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that ORs the "dependence value" with the "original address".
+// Otherwise, returns nullptr. This instruction is the first OR instruction
+// where one of its operand is an AND instruction with an operand being 0.
+//
+// E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getOrAddress(Value* CurrentAddress) {
+ // Is it a cast from integer to pointer type.
+ Instruction* OrAddress = nullptr;
+ Instruction* AndDep = nullptr;
+ Constant* ZeroConst = nullptr;
+
+ const Instruction* CastToPtr = dyn_cast<Instruction>(CurrentAddress);
+ if (CastToPtr && CastToPtr->getOpcode() == Instruction::IntToPtr) {
+ // Is it an OR instruction: %1 = or %and, %actualAddress.
+ if ((OrAddress = dyn_cast<Instruction>(CastToPtr->getOperand(0))) &&
+ OrAddress->getOpcode() == Instruction::Or) {
+ // The first operand should be and AND instruction.
+ AndDep = dyn_cast<Instruction>(OrAddress->getOperand(0));
+ if (AndDep && AndDep->getOpcode() == Instruction::And) {
+ // Also make sure its first operand of the "AND" is 0, or the "AND" is
+ // marked explicitly by "NoInstCombine".
+ if ((ZeroConst = dyn_cast<Constant>(AndDep->getOperand(1))) &&
+ ZeroConst->isNullValue()) {
+ return OrAddress;
+ }
+ }
+ }
+ }
+ // Looks like it's not been tainted.
+ return nullptr;
+}
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that taints the "dependence value". Otherwise, returns nullptr.
+// This instruction is the last AND instruction where one of its operand is 0.
+// E.g., it returns '%3' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getAndDependence(Value* CurrentAddress) {
+ // If 'CurrentAddress' is tainted, get the OR instruction.
+ auto* OrAddress = getOrAddress(CurrentAddress);
+ if (OrAddress == nullptr) {
+ return nullptr;
+ }
+
+ // No need to check the operands.
+ auto* AndDepInst = dyn_cast<Instruction>(OrAddress->getOperand(0));
+ assert(AndDepInst);
+ return AndDepInst;
+}
+
+// Given a value, if it's a tainted address, this function returns
+// the "dependence value", which is the first operand in the AND instruction.
+// E.g., it returns '%1' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Value* getDependence(Value* CurrentAddress) {
+ auto* AndInst = getAndDependence(CurrentAddress);
+ if (AndInst == nullptr) {
+ return nullptr;
+ }
+ return AndInst->getOperand(0);
+}
+
+// Given an address that has been tainted, returns the only condition it depends
+// on, if any; otherwise, returns nullptr.
+Value* getConditionDependence(Value* Address) {
+ auto* Dep = getDependence(Address);
+ if (Dep == nullptr) {
+ // 'Address' has not been dependence-tainted.
+ return nullptr;
+ }
+
+ Value* Operand = Dep;
+ while (true) {
+ auto* Inst = dyn_cast<Instruction>(Operand);
+ if (Inst == nullptr) {
+ // Non-instruction type does not have condition dependence.
+ return nullptr;
+ }
+ if (Inst->getOpcode() == Instruction::ICmp) {
+ return Inst;
+ } else {
+ if (Inst->getNumOperands() != 1) {
+ return nullptr;
+ } else {
+ Operand = Inst->getOperand(0);
+ }
+ }
+ }
+}
+
+// Conservatively decides whether the dependence set of 'Val1' includes the
+// dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand
+// 'Val2' and use that single value as its dependence set.
+// If it returns true, it means the dependence set of 'Val1' includes that of
+// 'Val2'; otherwise, it only means we cannot conclusively decide it.
+bool dependenceSetInclusion(Value* Val1, Value* Val2,
+ int Val1ExpandLevel = 2 * kDependenceDepth,
+ int Val2ExpandLevel = kDependenceDepth) {
+ typedef SmallSet<Value*, 8> IncludingSet;
+ typedef SmallSet<Value*, 4> IncludedSet;
+
+ IncludingSet DepSet1;
+ IncludedSet DepSet2;
+ // Look for more depths for the including set.
+ recursivelyFindDependence(&DepSet1, Val1, false /*Insert all visited nodes*/,
+ Val1ExpandLevel);
+ recursivelyFindDependence(&DepSet2, Val2, true /*Only insert leaf nodes*/,
+ Val2ExpandLevel);
+
+ auto set_inclusion = [](IncludingSet FullSet, IncludedSet Subset) {
+ for (auto* Dep : Subset) {
+ if (0 == FullSet.count(Dep)) {
+ return false;
+ }
+ }
+ return true;
+ };
+ bool inclusion = set_inclusion(DepSet1, DepSet2);
+ DEBUG(dbgs() << "[dependenceSetInclusion]: " << inclusion << "\n");
+ DEBUG(dbgs() << "Including set for: " << *Val1 << "\n");
+ DEBUG(for (const auto* Dep : DepSet1) { dbgs() << "\t\t" << *Dep << "\n"; });
+ DEBUG(dbgs() << "Included set for: " << *Val2 << "\n");
+ DEBUG(for (const auto* Dep : DepSet2) { dbgs() << "\t\t" << *Dep << "\n"; });
+
+ return inclusion;
+}
+
+// Recursively iterates through the operands spawned from 'DepVal'. If there
+// exists a single value that 'DepVal' only depends on, we call that value the
+// root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'.
+Value* getRootDependence(Value* DepVal) {
+ SmallSet<Value*, 8> DepSet;
+ for (unsigned depth = kDependenceDepth; depth > 0; --depth) {
+ recursivelyFindDependence(&DepSet, DepVal, true /*Only insert leaf nodes*/,
+ depth);
+ if (DepSet.size() == 1) {
+ return *DepSet.begin();
+ }
+ DepSet.clear();
+ }
+ return DepVal;
+}
+
+// This function actually taints 'DepVal' to the address to 'SI'. If the
+// address
+// of 'SI' already depends on whatever 'DepVal' depends on, this function
+// doesn't do anything and returns false. Otherwise, returns true.
+//
+// This effect forces the store and any stores that comes later to depend on
+// 'DepVal'. For example, we have a condition "cond", and a store instruction
+// "s: STORE addr, val". If we want "s" (and any later store) to depend on
+// "cond", we do the following:
+// %conv = sext i1 %cond to i32
+// %addrVal = ptrtoint i32* %addr to i32
+// %andCond = and i32 conv, 0;
+// %orAddr = or i32 %andCond, %addrVal;
+// %NewAddr = inttoptr i32 %orAddr to i32*;
+//
+// This is a more concrete example:
+// ------
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+bool taintStoreAddress(StoreInst* SI, Value* DepVal) {
+ // Set the insertion point right after the 'DepVal'.
+ IRBuilder<true, NoFolder> Builder(SI);
+ BasicBlock* BB = SI->getParent();
+ Value* Address = SI->getPointerOperand();
+ Type* TargetIntegerType =
+ IntegerType::get(Address->getContext(),
+ BB->getModule()->getDataLayout().getPointerSizeInBits());
+
+ // Does SI's address already depends on whatever 'DepVal' depends on?
+ if (StoreAddressDependOnValue(SI, DepVal)) {
+ return false;
+ }
+
+ // Figure out if there's a root variable 'DepVal' depends on. For example, we
+ // can extract "getelementptr inbounds %struct, %struct* %0, i64 0, i32 123"
+ // to be "%struct* %0" since all other operands are constant.
+ auto* RootVal = getRootDependence(DepVal);
+ auto* RootInst = dyn_cast<Instruction>(RootVal);
+ auto* DepValInst = dyn_cast<Instruction>(DepVal);
+ if (RootInst && DepValInst &&
+ RootInst->getParent() == DepValInst->getParent()) {
+ DepVal = RootVal;
+ }
+
+ // Is this already a dependence-tainted store?
+ Value* OldDep = getDependence(Address);
+ if (OldDep) {
+ // The address of 'SI' has already been tainted. Just need to absorb the
+ // DepVal to the existing dependence in the address of SI.
+ Instruction* AndDep = getAndDependence(Address);
+ IRBuilder<true, NoFolder> Builder(AndDep);
+ Value* NewDep = nullptr;
+ if (DepVal->getType() == AndDep->getType()) {
+ NewDep = Builder.CreateAnd(OldDep, DepVal);
+ } else {
+ NewDep = Builder.CreateAnd(
+ OldDep, createCast(Builder, DepVal, TargetIntegerType));
+ }
+
+ // Use the new AND instruction as the dependence
+ AndDep->setOperand(0, NewDep);
+ return true;
+ }
+
+ // SI's address has not been tainted. Now taint it with 'DepVal'.
+ Value* CastDepToInt = createCast(Builder, DepVal, TargetIntegerType);
+ Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType);
+ Value* AndDepVal =
+ Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0));
+ // XXX-comment: The original IR InstCombiner would change our and instruction
+ // to a select and then the back end optimize the condition out. We attach a
+ // flag to instructions and set it here to inform the InstCombiner to not to
+ // touch this and instruction at all.
+ Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast);
+ Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType());
+
+ DEBUG(dbgs() << "[taintStoreAddress]\n"
+ << "Original store: " << *SI << '\n');
+ SI->setOperand(1, NewAddr);
+
+ // Debug output.
+ DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n'
+ << "\tCast dependence value to integer: " << *CastDepToInt
+ << '\n'
+ << "\tCast address to integer: " << *PtrToIntCast << '\n'
+ << "\tAnd dependence value: " << *AndDepVal << '\n'
+ << "\tOr address: " << *OrAddr << '\n'
+ << "\tCast or instruction to address: " << *NewAddr << "\n\n");
+
+ return true;
+}
+
+// Given the load part result of a RMW 'LoadPart', taints the address of the store
+// exclusive 'Addr' and returns it.
+Value* taintRMWStoreAddressWithLoadPart(IRBuilder<>& Builder, Value* Address, Instruction* LoadPart) {
+ auto* BB = LoadPart->getParent();
+
+ Type* TargetIntegerType =
+ IntegerType::get(Address->getContext(),
+ BB->getModule()->getDataLayout().getPointerSizeInBits());
+
+ // SI's address has not been tainted. Now taint it with 'DepVal'.
+ Value* CastDepToInt = createCast(Builder, LoadPart, TargetIntegerType);
+ Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType);
+ Value* AndDepVal =
+ Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0));
+ Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast);
+ Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType());
+
+ DEBUG(dbgs() << "[taintStoreAddressOfRMWStorePart]\n"
+ << "Original address: " << *Address << '\n');
+
+ // Debug output.
+ DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n'
+ << "\tCast dependence value to integer: " << *CastDepToInt
+ << '\n'
+ << "\tCast address to integer: " << *PtrToIntCast << '\n'
+ << "\tAnd dependence value: " << *AndDepVal << '\n'
+ << "\tOr address: " << *OrAddr << '\n'
+ << "\tCast or instruction to address: " << *NewAddr << "\n\n");
+
+ return NewAddr;
+}
+
+// Looks for the previous store in the if block --- 'BrBB', which makes the
+// speculative store 'StoreToHoist' safe.
+Value* getSpeculativeStoreInPrevBB(StoreInst* StoreToHoist, BasicBlock* BrBB) {
+ assert(StoreToHoist && "StoreToHoist must be a real store");
+
+ Value* StorePtr = StoreToHoist->getPointerOperand();
+
+ // Look for a store to the same pointer in BrBB.
+ for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend();
+ RI != RE; ++RI) {
+ Instruction* CurI = &*RI;
+
+ StoreInst* SI = dyn_cast<StoreInst>(CurI);
+ // Found the previous store make sure it stores to the same location.
+ // XXX-update: If the previous store's original untainted address are the
+ // same as 'StorePtr', we are also good to hoist the store.
+ if (SI && (SI->getPointerOperand() == StorePtr ||
+ GetUntaintedAddress(SI->getPointerOperand()) == StorePtr)) {
+ // Found the previous store, return its value operand.
+ return SI;
+ }
+ }
+
+ assert(false &&
+ "We should not reach here since this store is safe to speculate");
+}
+
+// XXX-comment: Returns true if it changes the code, false otherwise (the branch
+// condition already depends on 'DepVal'.
+bool taintConditionalBranch(BranchInst* BI, Value* DepVal) {
+ assert(BI->isConditional());
+ auto* Cond = BI->getOperand(0);
+ if (dependenceSetInclusion(Cond, DepVal)) {
+ // The dependence/ordering is self-evident.
+ return false;
+ }
+
+ IRBuilder<true, NoFolder> Builder(BI);
+ auto* AndDep =
+ Builder.CreateAnd(DepVal, ConstantInt::get(DepVal->getType(), 0));
+ auto* TruncAndDep =
+ Builder.CreateTrunc(AndDep, IntegerType::get(DepVal->getContext(), 1));
+ auto* OrCond = Builder.CreateOr(TruncAndDep, Cond);
+ BI->setOperand(0, OrCond);
+
+ // Debug output.
+ DEBUG(dbgs() << "\tTainted branch condition:\n" << *BI->getParent());
+
+ return true;
+}
+
+bool ConditionalBranchDependsOnValue(BranchInst* BI, Value* DepVal) {
+ assert(BI->isConditional());
+ auto* Cond = BI->getOperand(0);
+ return dependenceSetInclusion(Cond, DepVal);
+}
+
+// XXX-update: For an instruction (e.g., a relaxed load) 'Inst', find the first
+// immediate atomic store or the first conditional branch. Returns nullptr if
+// there's no such immediately following store/branch instructions, which we can
+// only enforce the load with 'acquire'. 'ChainedBB' contains all the blocks
+// chained together with unconditional branches from 'BB' to the block with the
+// first store/cond branch.
+template <typename Vector>
+Instruction* findFirstStoreCondBranchInst(Instruction* Inst, Vector* ChainedBB) {
+ // In some situations, relaxed loads can be left as is:
+ // 1. The relaxed load is used to calculate the address of the immediate
+ // following store;
+ // 2. The relaxed load is used as a condition in the immediate following
+ // condition, and there are no stores in between. This is actually quite
+ // common. E.g.,
+ // int r1 = x.load(relaxed);
+ // if (r1 != 0) {
+ // y.store(1, relaxed);
+ // }
+ // However, in this function, we don't deal with them directly. Instead, we
+ // just find the immediate following store/condition branch and return it.
+
+ assert(ChainedBB != nullptr && "Chained BB should not be nullptr");
+ auto* BB = Inst->getParent();
+ ChainedBB->push_back(BB);
+ auto BE = BB->end();
+ auto BBI = BasicBlock::iterator(Inst);
+ BBI++;
+ while (true) {
+ for (; BBI != BE; BBI++) {
+ Instruction* Inst = &*BBI;
+ IntrinsicInst* II = dyn_cast<IntrinsicInst>(&*BBI);
+ if (II && II->getIntrinsicID() == Intrinsic::aarch64_stlxr) {
+ return II;
+ } else if (Inst->getOpcode() == Instruction::Store) {
+ return Inst;
+ } else if (Inst->getOpcode() == Instruction::Br) {
+ auto* BrInst = dyn_cast<BranchInst>(Inst);
+ if (BrInst->isConditional()) {
+ return Inst;
+ } else {
+ // Reinitialize iterators with the destination of the unconditional
+ // branch.
+ BB = BrInst->getSuccessor(0);
+ ChainedBB->push_back(BB);
+ BBI = BB->begin();
+ BE = BB->end();
+ break;
+ }
+ }
+ }
+ if (BBI == BE) {
+ return nullptr;
+ }
+ }
+}
+
+// XXX-update: Find the next node of the last relaxed load from 'FromInst' to
+// 'ToInst'. If none, return 'ToInst'.
+Instruction* findLastLoadNext(Instruction* FromInst, Instruction* ToInst) {
+ if (FromInst == ToInst) {
+ return ToInst;
+ }
+ Instruction* LastLoad = ToInst;
+ auto* BB = FromInst->getParent();
+ auto BE = BB->end();
+ auto BBI = BasicBlock::iterator(FromInst);
+ BBI++;
+ for (; BBI != BE && &*BBI != ToInst; BBI++) {
+ auto* LI = dyn_cast<LoadInst>(&*BBI);
+ if (LI == nullptr || !LI->isAtomic() || LI->getOrdering() != Monotonic) {
+ continue;
+ }
+ LastLoad = LI;
+ LastLoad = LastLoad->getNextNode();
+ }
+ return LastLoad;
+}
+
+// Inserts a fake conditional branch right after the instruction 'SplitInst',
+// and the branch condition is 'Condition'. 'SplitInst' will be placed in the
+// newly created block.
+void AddFakeConditionalBranch(Instruction* SplitInst, Value* Condition) {
+ auto* BB = SplitInst->getParent();
+ TerminatorInst* ThenTerm = nullptr;
+ TerminatorInst* ElseTerm = nullptr;
+ SplitBlockAndInsertIfThenElse(Condition, SplitInst, &ThenTerm, &ElseTerm);
+ assert(ThenTerm && ElseTerm &&
+ "Then/Else terminators cannot be empty after basic block spliting");
+ auto* ThenBB = ThenTerm->getParent();
+ auto* ElseBB = ElseTerm->getParent();
+ auto* TailBB = ThenBB->getSingleSuccessor();
+ assert(TailBB && "Tail block cannot be empty after basic block spliting");
+
+ ThenBB->disableCanEliminateBlock();
+ ThenBB->disableCanEliminateBlock();
+ TailBB->disableCanEliminateBlock();
+ ThenBB->setName(BB->getName() + "Then.Fake");
+ ElseBB->setName(BB->getName() + "Else.Fake");
+ DEBUG(dbgs() << "Add fake conditional branch:\n"
+ << "Then Block:\n"
+ << *ThenBB << "Else Block:\n"
+ << *ElseBB << "\n");
+}
+
+// Returns true if the code is changed, and false otherwise.
+void TaintRelaxedLoads(Instruction* UsageInst, Instruction* InsertPoint) {
+ // For better performance, we can add a "AND X 0" instruction before the
+ // condition.
+ auto* BB = UsageInst->getParent();
+ if (InsertPoint == nullptr) {
+ InsertPoint = UsageInst->getNextNode();
+ }
+ // Insert instructions after PHI nodes.
+ while (dyn_cast<PHINode>(InsertPoint)) {
+ InsertPoint = InsertPoint->getNextNode();
+ }
+ // First thing is to cast 'UsageInst' to an integer type if necessary.
+ Value* AndTarget = nullptr;
+ Type* TargetIntegerType =
+ IntegerType::get(UsageInst->getContext(),
+ BB->getModule()->getDataLayout().getPointerSizeInBits());
+
+ // Check whether InsertPoint is a added fake conditional branch.
+ BranchInst* BI = nullptr;
+ if ((BI = dyn_cast<BranchInst>(InsertPoint)) && BI->isConditional()) {
+ auto* Cond = dyn_cast<Instruction>(BI->getOperand(0));
+ if (Cond && Cond->getOpcode() == Instruction::ICmp) {
+ auto* CmpInst = dyn_cast<ICmpInst>(Cond);
+ auto* Op0 = dyn_cast<Instruction>(Cond->getOperand(0));
+ auto* Op1 = dyn_cast<ConstantInt>(Cond->getOperand(1));
+ // %tmp = And X, 0
+ // %cmp = ICMP_NE %tmp, 0
+ // Br %cmp
+ // =>
+ // %tmp1 = And X, NewTaintedVal
+ // %tmp2 = And %tmp1, 0
+ // %cmp = ICMP_NE %tmp2, 0
+ // Br %cmp
+ if (CmpInst && CmpInst->getPredicate() == CmpInst::ICMP_NE && Op0 &&
+ Op0->getOpcode() == Instruction::And && Op1 && Op1->isZero()) {
+ auto* Op01 = dyn_cast<ConstantInt>(Op0->getOperand(1));
+ if (Op01 && Op01->isZero()) {
+ // Now we have a previously added fake cond branch.
+ auto* Op00 = Op0->getOperand(0);
+ IRBuilder<true, NoFolder> Builder(CmpInst);
+ if (Op00->getType() == UsageInst->getType()) {
+ AndTarget = UsageInst;
+ } else {
+ AndTarget = createCast(Builder, UsageInst, Op00->getType());
+ }
+ AndTarget = Builder.CreateAnd(Op00, AndTarget);
+ auto* AndZero = dyn_cast<Instruction>(Builder.CreateAnd(
+ AndTarget, Constant::getNullValue(AndTarget->getType())));
+ CmpInst->setOperand(0, AndZero);
+ return;
+ }
+ }
+ }
+ }
+
+ IRBuilder<true, NoFolder> Builder(InsertPoint);
+ if (IntegerType::classof(UsageInst->getType())) {
+ AndTarget = UsageInst;
+ } else {
+ AndTarget = createCast(Builder, UsageInst, TargetIntegerType);
+ }
+ auto* AndZero = dyn_cast<Instruction>(
+ Builder.CreateAnd(AndTarget, Constant::getNullValue(AndTarget->getType())));
+ auto* FakeCondition = dyn_cast<Instruction>(Builder.CreateICmp(
+ CmpInst::ICMP_NE, AndZero, Constant::getNullValue(AndTarget->getType())));
+ AddFakeConditionalBranch(FakeCondition->getNextNode(), FakeCondition);
+}
+
+// Taints the 'Inst' (i.e., adds a fake conditional block that uses the 'Inst')
+// at the beginning of basic block 'BB'. Note that we need to add an appropriate
+// PHI node and taint the PHI node. Returns true if the code is changed, and
+// false otherwise.
+void TaintAtBlockBeginning(Instruction* Inst, BasicBlock* BB) {
+ auto* CurBB = Inst->getParent();
+ auto* FirstInst = BB->getFirstNonPHI();
+ IRBuilder<true, NoFolder> Builder(FirstInst);
+ auto* Phi = Builder.CreatePHI(Inst->getType(), 0, Inst->getName() + ".phi");
+ // Multiple blocks going to BB. We should add a PHI node w.r.t. 'Inst'.
+ for (auto* Pred : predecessors(BB)) {
+ Value* Val = nullptr;
+ if (Pred == CurBB) {
+ Val = Inst;
+ } else {
+ // We don't care what value other paths are.
+ Val = UndefValue::get(Inst->getType());
+ }
+ Phi->addIncoming(Val, Pred);
+ }
+ return TaintRelaxedLoads(Phi, Phi);
+}
+
+// XXX-comment: Finds the appropriate Value derived from an atomic load.
+// 'ChainedBB' contains all the blocks chained together with unconditional
+// branches from LI's parent BB to the block with the first store/cond branch.
+// If we don't find any, it means 'LI' is not used at all (which should not
+// happen in practice). We can simply set 'LI' to be acquire just to be safe.
+template <typename Vector>
+Instruction* findMostRecentDependenceUsage(LoadInst* LI, Instruction* LaterInst,
+ Vector* ChainedBB,
+ DominatorTree* DT) {
+ typedef SmallSet<Instruction*, 8> UsageSet;
+ typedef DenseMap<BasicBlock*, std::unique_ptr<UsageSet>> UsageMap;
+ assert(ChainedBB->size() >= 1 && "ChainedBB must have >=1 size");
+ // Mapping from basic block in 'ChainedBB' to the set of dependence usage of
+ // 'LI' in each block.
+ UsageMap usage_map;
+ auto* LoadBB = LI->getParent();
+ usage_map[LoadBB] = make_unique<UsageSet>();
+ usage_map[LoadBB]->insert(LI);
+
+ for (auto* BB : *ChainedBB) {
+ if (usage_map[BB] == nullptr) {
+ usage_map[BB] = make_unique<UsageSet>();
+ }
+ auto& usage_set = usage_map[BB];
+ if (usage_set->size() == 0) {
+ // The value has not been used.
+ return nullptr;
+ }
+ // Calculate the usage in the current BB first.
+ std::list<Value*> bb_usage_list;
+ std::copy(usage_set->begin(), usage_set->end(),
+ std::back_inserter(bb_usage_list));
+ for (auto list_iter = bb_usage_list.begin();
+ list_iter != bb_usage_list.end(); list_iter++) {
+ auto* val = *list_iter;
+ for (auto* U : val->users()) {
+ Instruction* Inst = nullptr;
+ if (!(Inst = dyn_cast<Instruction>(U))) {
+ continue;
+ }
+ assert(Inst && "Usage value must be an instruction");
+ auto iter =
+ std::find(ChainedBB->begin(), ChainedBB->end(), Inst->getParent());
+ if (iter == ChainedBB->end()) {
+ // Only care about usage within ChainedBB.
+ continue;
+ }
+ auto* UsageBB = *iter;
+ if (UsageBB == BB) {
+ // Current BB.
+ if (!usage_set->count(Inst)) {
+ bb_usage_list.push_back(Inst);
+ usage_set->insert(Inst);
+ }
+ } else {
+ // A following BB.
+ if (usage_map[UsageBB] == nullptr) {
+ usage_map[UsageBB] = make_unique<UsageSet>();
+ }
+ usage_map[UsageBB]->insert(Inst);
+ }
+ }
+ }
+ }
+
+ // Pick one usage that is in LaterInst's block and that dominates 'LaterInst'.
+ auto* LaterBB = LaterInst->getParent();
+ auto& usage_set = usage_map[LaterBB];
+ Instruction* usage_inst = nullptr;
+ for (auto* inst : *usage_set) {
+ if (DT->dominates(inst, LaterInst)) {
+ usage_inst = inst;
+ break;
+ }
+ }
+
+ assert(usage_inst && "The usage instruction in the same block but after the "
+ "later instruction");
+ return usage_inst;
+}
+
+// XXX-comment: For an instruction (e.g., a load) 'Inst', and the first upcoming
+// store/conditional branch instruction 'FirstInst', returns whether there are
+// any intermediate instructions I (including 'FirstInst') that satisfy:
+// 1. I is a load/store, and its address depends on 'Inst'.
+// 2. I is a conditional branch whose condition depends on 'Inst'.
+// Note that 'Inst' and 'FirstInst' can be in different basic blocks, but Inst's
+// basic block can unconditionally jumps (by steps) to FirstInst's block.
+bool NeedExtraConstraints(Instruction* Inst, Instruction* FirstInst) {
+ if (!FirstInst) {
+ return true;
+ }
+ auto BBI = Inst->getIterator();
+ BBI++;
+ while (true) {
+ auto* I = &*BBI;
+ BBI++;
+ BranchInst *BI = dyn_cast<BranchInst>(I);
+ if (BI && BI->isUnconditional()) {
+ BasicBlock *DestBB = BI->getSuccessor(0);
+ BBI = DestBB->begin();
+ continue;
+ }
+
+ if (I->getOpcode() == Instruction::Store) {
+ return !StoreAddressDependOnValue(dyn_cast<StoreInst>(I), Inst);
+ } else if (I->getOpcode() == Instruction::Load) {
+ if (I->isAtomic() &&
+ LoadAddressDependOnValue(dyn_cast<LoadInst>(I), Inst)) {
+ // Normal loads are subject to be reordered by the backend, so we only
+ // rely on atomic loads.
+ return false;
+ }
+ } else if (I->getOpcode() == Instruction::Br) {
+ return !ConditionalBranchDependsOnValue(dyn_cast<BranchInst>(I), Inst);
+ }
+ if (I == FirstInst) {
+ return true;
+ }
+ }
+ return true;
+}
+
+// XXX-comment: For an instruction (e.g., a load) 'Inst', returns whether there
+// are any intermediate instructions I (including 'FirstInst') that satisfy:
+// 1. There are no reachable store/conditional branch before 'I'.
+// 2. I is a load/store, and its address depends on 'Inst'.
+// 3. I is a conditional branch whose condition depends on 'Inst'.
+// Note that 'Inst' and 'FirstInst' can be in different basic blocks, but Inst's
+// basic block can unconditionally jumps (by steps) to FirstInst's block.
+bool NeedExtraConstraints(Instruction* Inst) {
+ SmallVector<BasicBlock*, 2> ChainedBB;
+ auto* FirstInst = findFirstStoreCondBranchInst(Inst, &ChainedBB);
+ return NeedExtraConstraints(Inst, FirstInst);
+}
+
+// XXX-comment: Returns whether the code has been changed.
+bool AddFakeConditionalBranchAfterMonotonicLoads(
+ SmallSet<LoadInst*, 1>& MonotonicLoadInsts, DominatorTree* DT) {
+ bool Changed = false;
+ while (!MonotonicLoadInsts.empty()) {
+ auto* LI = *MonotonicLoadInsts.begin();
+ MonotonicLoadInsts.erase(LI);
+ SmallVector<BasicBlock*, 2> ChainedBB;
+ auto* FirstInst = findFirstStoreCondBranchInst(LI, &ChainedBB);
+
+ // First check whether existing load-store ordering constraints exist.
+ if (FirstInst != nullptr && !NeedExtraConstraints(LI, FirstInst)) {
+ continue;
+ }
+
+ // We really need to process the relaxed load now. First see if we can delay
+ // the tainting.
+ if (FirstInst) {
+ auto* FirstInstBBTerm = FirstInst->getParent()->getTerminator();
+ while (FirstInst != FirstInstBBTerm) {
+ if (!CanDelayTainting(LI, FirstInst)) {
+ break;
+ }
+ FirstInst = FirstInst->getNextNode();
+ }
+ }
+
+ StoreInst* SI = nullptr;
+ IntrinsicInst* II = nullptr;
+ if (FirstInst) {
+ SI = dyn_cast<StoreInst>(FirstInst);
+ II = dyn_cast<IntrinsicInst>(FirstInst);
+ }
+ if (FirstInst &&
+ (SI || (II && II->getIntrinsicID() == Intrinsic::aarch64_stlxr))) {
+ // For immediately coming stores, taint the address of the store.
+ if (FirstInst->getParent() == LI->getParent() ||
+ DT->dominates(LI, FirstInst)) {
+ TaintRelaxedLoads(LI, FirstInst);
+ Changed = true;
+ } else {
+ auto* Inst =
+ findMostRecentDependenceUsage(LI, FirstInst, &ChainedBB, DT);
+ if (!Inst) {
+ LI->setOrdering(Acquire);
+ Changed = true;
+ } else {
+ TaintRelaxedLoads(Inst, FirstInst);
+ Changed = true;
+ }
+ }
+ } else {
+ // No upcoming branch
+ if (!FirstInst) {
+ TaintRelaxedLoads(LI, nullptr);
+ Changed = true;
+ } else {
+ // For immediately coming branch, directly add a fake branch.
+ if (FirstInst->getParent() == LI->getParent() ||
+ DT->dominates(LI, FirstInst)) {
+ TaintRelaxedLoads(LI, FirstInst);
+ Changed = true;
+ } else {
+ auto* Inst =
+ findMostRecentDependenceUsage(LI, FirstInst, &ChainedBB, DT);
+ if (Inst) {
+ TaintRelaxedLoads(Inst, FirstInst);
+ } else {
+ LI->setOrdering(Acquire);
+ }
+ Changed = true;
+ }
+ }
+ }
+ }
+ return Changed;
+}
+
+/**** Implementations of public methods for dependence tainting ****/
+Value* GetUntaintedAddress(Value* CurrentAddress) {
+ auto* OrAddress = getOrAddress(CurrentAddress);
+ if (OrAddress == nullptr) {
+ // Is it tainted by a select instruction?
+ auto* Inst = dyn_cast<Instruction>(CurrentAddress);
+ if (nullptr != Inst && Inst->getOpcode() == Instruction::Select) {
+ // A selection instruction.
+ if (Inst->getOperand(1) == Inst->getOperand(2)) {
+ return Inst->getOperand(1);
+ }
+ }
+
+ return CurrentAddress;
+ }
+
+ auto* CastToInt = dyn_cast<Instruction>(OrAddress->getOperand(1));
+ if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) {
+ return CastToInt->getOperand(0);
+ } else {
+ // This should be a IntToPtr constant expression.
+ ConstantExpr* PtrToIntExpr =
+ dyn_cast<ConstantExpr>(OrAddress->getOperand(1));
+ if (PtrToIntExpr && PtrToIntExpr->getOpcode() == Instruction::PtrToInt) {
+ return PtrToIntExpr->getOperand(0);
+ }
+ }
+
+ // Looks like it's not been dependence-tainted. Returns itself.
+ return CurrentAddress;
+}
+
+MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI) {
+ AAMDNodes AATags;
+ SI->getAAMetadata(AATags);
+ const auto& DL = SI->getModule()->getDataLayout();
+ const auto* OriginalAddr = GetUntaintedAddress(SI->getPointerOperand());
+ DEBUG(if (OriginalAddr != SI->getPointerOperand()) {
+ dbgs() << "[GetUntaintedMemoryLocation]\n"
+ << "Storing address: " << *SI->getPointerOperand()
+ << "\nUntainted address: " << *OriginalAddr << "\n";
+ });
+ return MemoryLocation(OriginalAddr,
+ DL.getTypeStoreSize(SI->getValueOperand()->getType()),
+ AATags);
+}
+
+bool TaintDependenceToStore(StoreInst* SI, Value* DepVal) {
+ if (dependenceSetInclusion(SI, DepVal)) {
+ return false;
+ }
+
+ bool tainted = taintStoreAddress(SI, DepVal);
+ assert(tainted);
+ return tainted;
+}
+
+bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal) {
+ if (dependenceSetInclusion(SI->getPointerOperand(), DepVal)) {
+ return false;
+ }
+
+ bool tainted = taintStoreAddress(SI, DepVal);
+ assert(tainted);
+ return tainted;
+}
+
+bool CompressTaintedStore(BasicBlock* BB) {
+ // This function looks for windows of adajcent stores in 'BB' that satisfy the
+ // following condition (and then do optimization):
+ // *Addr(d1) = v1, d1 is a condition and is the only dependence the store's
+ // address depends on && Dep(v1) includes Dep(d1);
+ // *Addr(d2) = v2, d2 is a condition and is the only dependnece the store's
+ // address depends on && Dep(v2) includes Dep(d2) &&
+ // Dep(d2) includes Dep(d1);
+ // ...
+ // *Addr(dN) = vN, dN is a condition and is the only dependence the store's
+ // address depends on && Dep(dN) includes Dep(d"N-1").
+ //
+ // As a result, Dep(dN) includes [Dep(d1) V ... V Dep(d"N-1")], so we can
+ // safely transform the above to the following. In between these stores, we
+ // can omit untainted stores to the same address 'Addr' since they internally
+ // have dependence on the previous stores on the same address.
+ // =>
+ // *Addr = v1
+ // *Addr = v2
+ // *Addr(d3) = v3
+ for (auto BI = BB->begin(), BE = BB->end(); BI != BE; BI++) {
+ // Look for the first store in such a window of adajacent stores.
+ auto* FirstSI = dyn_cast<StoreInst>(&*BI);
+ if (!FirstSI) {
+ continue;
+ }
+
+ // The first store in the window must be tainted.
+ auto* UntaintedAddress = GetUntaintedAddress(FirstSI->getPointerOperand());
+ if (UntaintedAddress == FirstSI->getPointerOperand()) {
+ continue;
+ }
+
+ // The first store's address must directly depend on and only depend on a
+ // condition.
+ auto* FirstSIDepCond = getConditionDependence(FirstSI->getPointerOperand());
+ if (nullptr == FirstSIDepCond) {
+ continue;
+ }
+
+ // Dep(first store's storing value) includes Dep(tainted dependence).
+ if (!dependenceSetInclusion(FirstSI->getValueOperand(), FirstSIDepCond)) {
+ continue;
+ }
+
+ // Look for subsequent stores to the same address that satisfy the condition
+ // of "compressing the dependence".
+ SmallVector<StoreInst*, 8> AdajacentStores;
+ AdajacentStores.push_back(FirstSI);
+ auto BII = BasicBlock::iterator(FirstSI);
+ for (BII++; BII != BE; BII++) {
+ auto* CurrSI = dyn_cast<StoreInst>(&*BII);
+ if (!CurrSI) {
+ if (BII->mayHaveSideEffects()) {
+ // Be conservative. Instructions with side effects are similar to
+ // stores.
+ break;
+ }
+ continue;
+ }
+
+ auto* OrigAddress = GetUntaintedAddress(CurrSI->getPointerOperand());
+ auto* CurrSIDepCond = getConditionDependence(CurrSI->getPointerOperand());
+ // All other stores must satisfy either:
+ // A. 'CurrSI' is an untainted store to the same address, or
+ // B. the combination of the following 5 subconditions:
+ // 1. Tainted;
+ // 2. Untainted address is the same as the group's address;
+ // 3. The address is tainted with a sole value which is a condition;
+ // 4. The storing value depends on the condition in 3.
+ // 5. The condition in 3 depends on the previous stores dependence
+ // condition.
+
+ // Condition A. Should ignore this store directly.
+ if (OrigAddress == CurrSI->getPointerOperand() &&
+ OrigAddress == UntaintedAddress) {
+ continue;
+ }
+ // Check condition B.
+ if (OrigAddress == CurrSI->getPointerOperand() ||
+ OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr ||
+ !dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) {
+ // Check condition 1, 2, 3 & 4.
+ break;
+ }
+
+ // Check condition 5.
+ StoreInst* PrevSI = AdajacentStores[AdajacentStores.size() - 1];
+ auto* PrevSIDepCond = getConditionDependence(PrevSI->getPointerOperand());
+ assert(PrevSIDepCond &&
+ "Store in the group must already depend on a condtion");
+ if (!dependenceSetInclusion(CurrSIDepCond, PrevSIDepCond)) {
+ break;
+ }
+
+ AdajacentStores.push_back(CurrSI);
+ }
+
+ if (AdajacentStores.size() == 1) {
+ // The outer loop should keep looking from the next store.
+ continue;
+ }
+
+ // Now we have such a group of tainted stores to the same address.
+ DEBUG(dbgs() << "[CompressTaintedStore]\n");
+ DEBUG(dbgs() << "Original BB\n");
+ DEBUG(dbgs() << *BB << '\n');
+ auto* LastSI = AdajacentStores[AdajacentStores.size() - 1];
+ for (unsigned i = 0; i < AdajacentStores.size() - 1; ++i) {
+ auto* SI = AdajacentStores[i];
+
+ // Use the original address for stores before the last one.
+ SI->setOperand(1, UntaintedAddress);
+
+ DEBUG(dbgs() << "Store address has been reversed: " << *SI << '\n';);
+ }
+ // XXX-comment: Try to make the last store use fewer registers.
+ // If LastSI's storing value is a select based on the condition with which
+ // its address is tainted, transform the tainted address to a select
+ // instruction, as follows:
+ // r1 = Select Cond ? A : B
+ // r2 = Cond & 0
+ // r3 = Addr | r2
+ // *r3 = r1
+ // ==>
+ // r1 = Select Cond ? A : B
+ // r2 = Select Cond ? Addr : Addr
+ // *r2 = r1
+ // The idea is that both Select instructions depend on the same condition,
+ // so hopefully the backend can generate two cmov instructions for them (and
+ // this saves the number of registers needed).
+ auto* LastSIDep = getConditionDependence(LastSI->getPointerOperand());
+ auto* LastSIValue = dyn_cast<Instruction>(LastSI->getValueOperand());
+ if (LastSIValue && LastSIValue->getOpcode() == Instruction::Select &&
+ LastSIValue->getOperand(0) == LastSIDep) {
+ // XXX-comment: Maybe it's better for us to just leave it as an and/or
+ // dependence pattern.
+ // /*
+ IRBuilder<true, NoFolder> Builder(LastSI);
+ auto* Address =
+ Builder.CreateSelect(LastSIDep, UntaintedAddress, UntaintedAddress);
+ LastSI->setOperand(1, Address);
+ DEBUG(dbgs() << "The last store becomes :" << *LastSI << "\n\n";);
+ // */
+ }
+ }
+
+ return true;
+}
+
+bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore) {
+ Value* OldDep = getDependence(OldAddress);
+ // Return false when there's no dependence to pass from the OldAddress.
+ if (!OldDep) {
+ return false;
+ }
+
+ // No need to pass the dependence to NewStore's address if it already depends
+ // on whatever 'OldAddress' depends on.
+ if (StoreAddressDependOnValue(NewStore, OldDep)) {
+ return false;
+ }
+ return taintStoreAddress(NewStore, OldAddress);
+}
+
+SmallSet<Value*, 8> FindDependence(Value* Val) {
+ SmallSet<Value*, 8> DepSet;
+ recursivelyFindDependence(&DepSet, Val, true /*Only insert leaf nodes*/);
+ return DepSet;
+}
+
+bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) {
+ return dependenceSetInclusion(SI->getPointerOperand(), DepVal);
+}
+
+bool LoadAddressDependOnValue(LoadInst* LI, Value* DepVal) {
+ return dependenceSetInclusion(LI->getPointerOperand(), DepVal);
+}
+
+bool StoreDependOnValue(StoreInst* SI, Value* Dep) {
+ return dependenceSetInclusion(SI, Dep);
+}
+
+bool ValueDependOnValue(Value* Inst, Value* Dep) {
+ return dependenceSetInclusion(Inst, Dep);
+}
+
+// XXX-update: Checks whether the relaxed load 'LI' has subsequent instructions
+// that naturally prevents it from being reordered across later stores.
+bool HasSubsequentOrderingProtection(LoadInst* LI) {
+ auto* BB = LI->getParent();
+ auto* Term = BB->getTerminator();
+ for (auto Iter = BasicBlock::iterator(LI->getNextNode()); Iter != BB->end();
+ Iter++) {
+ Instruction* I = &*Iter;
+
+ // Reaching the end of the block.
+ if (I == Term) {
+ auto* Branch = dyn_cast<BranchInst>(Term);
+ // The last instruction isn't a branch, end of analysis.
+ if (!Branch) {
+ return false;
+ }
+ if (Branch->isConditional()) {
+ if (ValueDependOnValue(Branch, LI)) {
+ // 'LI' is used in the conditional branch.
+ return true;
+ } else {
+ // Reach the end with a cond branch that doesn't use the result of
+ // 'LI'.
+ return false;
+ }
+ } else {
+ // Reach the end with a unconditional branch, keep going to the next
+ // block.
+ BB = BB->getSingleSuccessor();
+ Term = BB->getTerminator();
+ Iter = BB->begin();
+ continue;
+ }
+ }
+
+ // 'I' is a CAS whose old value depends on 'LI'. We don't need to taint 'LI'
+ // then.
+ auto* CAS = dyn_cast<AtomicCmpXchgInst>(I);
+ if (CAS) {
+ if (ValueDependOnValue(CAS->getCompareOperand(), LI)) {
+ return true;
+ }
+ }
+
+ // fetch_* operations that have acquire-release semantics.
+ auto* RMW = dyn_cast<AtomicRMWInst>(I);
+ if (RMW) {
+ auto Order = RMW->getOrdering();
+ if (Order == AcquireRelease || Order == SequentiallyConsistent) {
+ return true;
+ }
+ }
+
+ // A load whose address depends on 'LI' prevents later stores from being
+ // reordered.
+ auto* LdInst = dyn_cast<LoadInst>(I);
+ if (LdInst) {
+ if (ValueDependOnValue(LdInst->getPointerOperand(), LI)) {
+ return true;
+ }
+ }
+
+ // Other instructions that don't affect the reordering.
+ if (!I->mayHaveSideEffects()) {
+ continue;
+ }
+
+ // A store whose address depends on 'LI' is also protection.
+ auto* SI = dyn_cast<StoreInst>(I);
+ if (SI) {
+ if (ValueDependOnValue(SI->getPointerOperand(), LI)) {
+ return true;
+ }
+ }
+
+ // The following are store/store-like operations. They don't protect later
+ // stores from being reordered across 'LI', but the analysis can go on if
+ // they naturally can't be reordered across 'LI' themselves.
+ {
+ // Release (or stronger) store.
+ if (SI) {
+ auto Order = SI->getOrdering();
+ if (Order == Release || Order == SequentiallyConsistent) {
+ continue;
+ }
+ }
+
+ // Release (or stronger) fetch_*.
+ if (RMW) {
+ auto Order = RMW->getOrdering();
+ if (Order == Release || Order == AcquireRelease ||
+ Order == SequentiallyConsistent) {
+ continue;
+ }
+ }
+
+ // The instruction naturally depends on 'LI'.
+ if (ValueDependOnValue(I, LI)) {
+ continue;
+ }
+ }
+ // Otherwise, we need to taint 'LI'.
+ // XXX-comment: It may be a good idea that we can delay the fake conditional
+ // branch down to this instruction.
+ return false;
+ }
+
+ // Just in case, the loop should never end without reaching a return.
+ return false;
+}
+
+// XXX-update: Checks whether the tainting to instruction 'I' can be delayed
+// with respects to the relaxed load 'LI'. This usually means 'I' itself already
+// depends on the 'LI' or 'I' is a store/store-like atomic operation that has
+// release semantics.
+bool CanDelayTainting(LoadInst* LI, Instruction* I) {
+ if (I == I->getParent()->getTerminator()) {
+ return false;
+ }
+
+ if (!I->mayHaveSideEffects()) {
+ return true;
+ }
+
+ // The following are store/store-like operations. They don't protect later
+ // stores from being reordered across 'LI', but the analysis can go on if
+ // they naturally can't be reordered across 'LI' themselves.
+
+ // Release (or stronger) store.
+ auto* SI = dyn_cast<StoreInst>(I);
+ if (SI) {
+ auto Order = SI->getOrdering();
+ if (Order == Release || Order == SequentiallyConsistent) {
+ return true;
+ }
+ }
+
+ // Release (or stronger) fetch_*.
+ auto* RMW = dyn_cast<AtomicRMWInst>(I);
+ if (RMW) {
+ auto Order = RMW->getOrdering();
+ if (Order == Release || Order == AcquireRelease ||
+ Order == SequentiallyConsistent) {
+ return true;
+ }
+ }
+
+ // The instruction naturally depends on 'LI'.
+ if (ValueDependOnValue(I, LI)) {
+ return true;
+ }
+
+ // Otherwise, be conservative and say no!
+ return false;
+}
+
+} // namespace llvm
--- /dev/null
+//===- TaintRelaxedAtomicsUtils.h - Utils for Tainting Relaxed Atomics --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains util functions for taiting relaxed atomics to enforce
+// strong ordering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _TAINTRELAXEDATOMICSUTILS_H
+#define _TAINTRELAXEDATOMICSUTILS_H
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/NoFolder.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include "llvm/Transforms/Utils/BypassSlowDivision.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
+using namespace llvm;
+
+namespace llvm {
+
+bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal);
+
+bool LoadAddressDependOnValue(LoadInst* LI, Value* DepVal);
+
+Value* GetUntaintedAddress(Value* CurrentAddress);
+
+// Helper function to create a Cast instruction.
+template <typename BuilderTy>
+Value* createCast(BuilderTy& Builder, Value* DepVal, Type* TargetIntegerType);
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that ORs the "dependence value" with the "original address".
+// Otherwise, returns nullptr. This instruction is the first OR instruction
+// where one of its operand is an AND instruction with an operand being 0.
+//
+// E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getOrAddress(Value* CurrentAddress);
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that taints the "dependence value". Otherwise, returns nullptr.
+// This instruction is the last AND instruction where one of its operand is 0.
+// E.g., it returns '%3' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getAndDependence(Value* CurrentAddress);
+
+// Given a value, if it's a tainted address, this function returns
+// the "dependence value", which is the first operand in the AND instruction.
+// E.g., it returns '%1' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Value* getDependence(Value* CurrentAddress);
+
+// Given an address that has been tainted, returns the only condition it depends
+// on, if any; otherwise, returns nullptr.
+Value* getConditionDependence(Value* Address);
+
+// Conservatively decides whether the dependence set of 'Val1' includes the
+// dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand
+// 'Val2' and use that single value as its dependence set.
+// If it returns true, it means the dependence set of 'Val1' includes that of
+// 'Val2'; otherwise, it only means we cannot conclusively decide it.
+bool dependenceSetInclusion(Value* Val1, Value* Val2,
+ int Val1ExpandLevel,
+ int Val2ExpandLevel);
+
+// Recursively iterates through the operands spawned from 'DepVal'. If there
+// exists a single value that 'DepVal' only depends on, we call that value the
+// root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'.
+Value* getRootDependence(Value* DepVal);
+
+// This function actually taints 'DepVal' to the address to 'SI'. If the
+// address
+// of 'SI' already depends on whatever 'DepVal' depends on, this function
+// doesn't do anything and returns false. Otherwise, returns true.
+//
+// This effect forces the store and any stores that comes later to depend on
+// 'DepVal'. For example, we have a condition "cond", and a store instruction
+// "s: STORE addr, val". If we want "s" (and any later store) to depend on
+// "cond", we do the following:
+// %conv = sext i1 %cond to i32
+// %addrVal = ptrtoint i32* %addr to i32
+// %andCond = and i32 conv, 0;
+// %orAddr = or i32 %andCond, %addrVal;
+// %NewAddr = inttoptr i32 %orAddr to i32*;
+//
+// This is a more concrete example:
+// ------
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+bool taintStoreAddress(StoreInst* SI, Value* DepVal);
+
+// Given the load part result of a RMW 'LoadPart', taints the address of the
+// store exclusive 'Addr' and returns it.
+Value* taintRMWStoreAddressWithLoadPart(IRBuilder<>& Builder, Value* Address,
+ Instruction* LoadPart);
+
+// XXX-comment: Returns true if it changes the code, false otherwise (the
+// branch
+// condition already depends on 'DepVal'.
+bool taintConditionalBranch(BranchInst* BI, Value* DepVal);
+
+bool ConditionalBranchDependsOnValue(BranchInst* BI, Value* DepVal);
+
+// XXX-update: For an instruction (e.g., a relaxed load) 'Inst', find the first
+// immediate atomic store or the first conditional branch. Returns nullptr if
+// there's no such immediately following store/branch instructions, which we can
+// only enforce the load with 'acquire'. 'ChainedBB' contains all the blocks
+// chained together with unconditional branches from 'BB' to the block with the
+// first store/cond branch.
+template <typename Vector>
+Instruction* findFirstStoreCondBranchInst(Instruction* Inst, Vector* ChainedBB);
+
+// XXX-update: Find the next node of the last relaxed load from 'FromInst' to
+// 'ToInst'. If none, return 'ToInst'.
+Instruction* findLastLoadNext(Instruction* FromInst, Instruction* ToInst);
+
+// Inserts a fake conditional branch right after the instruction 'SplitInst',
+// and the branch condition is 'Condition'. 'SplitInst' will be placed in the
+// newly created block.
+void AddFakeConditionalBranch(Instruction* SplitInst, Value* Condition);
+
+// Returns true if the code is changed, and false otherwise.
+void TaintRelaxedLoads(Instruction* UsageInst, Instruction* InsertPoint);
+
+// Taints the 'Inst' (i.e., adds a fake conditional block that uses the 'Inst')
+// at the beginning of basic block 'BB'. Note that we need to add an appropriate
+// PHI node and taint the PHI node. Returns true if the code is changed, and
+// false otherwise.
+void TaintAtBlockBeginning(Instruction* Inst, BasicBlock* BB);
+
+// XXX-comment: Finds the appropriate Value derived from an atomic load.
+// 'ChainedBB' contains all the blocks chained together with unconditional
+// branches from LI's parent BB to the block with the first store/cond branch.
+// If we don't find any, it means 'LI' is not used at all (which should not
+// happen in practice). We can simply set 'LI' to be acquire just to be safe.
+template <typename Vector>
+Instruction* findMostRecentDependenceUsage(LoadInst* LI, Instruction* LaterInst,
+ Vector* ChainedBB,
+ DominatorTree* DT);
+
+// XXX-comment: For an instruction (e.g., a load) 'Inst', and the first upcoming
+// store/conditional branch instruction 'FirstInst', returns whether there are
+// any intermediate instructions I (including 'FirstInst') that satisfy:
+// 1. I is a load/store, and its address depends on 'Inst'.
+// 2. I is a conditional branch whose condition depends on 'Inst'.
+// Note that 'Inst' and 'FirstInst' can be in different basic blocks, but Inst's
+// basic block can unconditionally jumps (by steps) to FirstInst's block.
+bool NeedExtraConstraints(Instruction* Inst, Instruction* FirstInst);
+
+// XXX-comment: For an instruction (e.g., a load) 'Inst', returns whether there
+// are any intermediate instructions I (including 'FirstInst') that satisfy:
+// 1. There are no reachable store/conditional branch before 'I'.
+// 2. I is a load/store, and its address depends on 'Inst'.
+// 3. I is a conditional branch whose condition depends on 'Inst'.
+// Note that 'Inst' and 'FirstInst' can be in different basic blocks, but Inst's
+// basic block can unconditionally jumps (by steps) to FirstInst's block.
+bool NeedExtraConstraints(Instruction* Inst);
+
+// XXX-comment: Returns whether the code has been changed.
+bool AddFakeConditionalBranchAfterMonotonicLoads(
+ SmallSet<LoadInst*, 1>& MonotonicLoadInsts, DominatorTree* DT);
+
+/**** Public methods for dependence tainting ****/
+Value* GetUntaintedAddress(Value* CurrentAddress);
+
+MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI);
+
+bool TaintDependenceToStore(StoreInst* SI, Value* DepVal);
+
+bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal);
+
+bool CompressTaintedStore(BasicBlock* BB);
+
+bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore);
+
+SmallSet<Value*, 8> FindDependence(Value* Val);
+
+bool StoreDependOnValue(StoreInst* SI, Value* Dep);
+
+bool ValueDependOnValue(Value * Inst, Value* Dep);
+
+// XXX-update: Checks whether the relaxed load 'LI' has subsequent instructions
+// that naturally prevents it from being reordered across later stores.
+bool HasSubsequentOrderingProtection(LoadInst* LI);
+
+
+// XXX-update: Checks whether the tainting to instruction 'I' can be delayed
+// with respects to the relaxed load 'LI'. This usually means 'I' itself already
+// depends on the 'LI' or 'I' is a store/store-like atomic operation that has
+// release semantics.
+bool CanDelayTainting(LoadInst* LI, Instruction* I);
+
+} // namespace llvm
+
+#endif