#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Utils/EscapeEnumerator.h"
#include <vector>
using namespace llvm;
}
if (print) {
- errs() << position_string;
+ errs() << position_string << "\n";
}
return IRB.CreateGlobalStringPtr (position_string);
static const size_t kNumberOfAccessSizes = 4;
-int getAtomicOrderIndex(AtomicOrdering order){
+int getAtomicOrderIndex(AtomicOrdering order) {
switch (order) {
case AtomicOrdering::Monotonic:
return (int)AtomicOrderingCABI::relaxed;
}
}
+AtomicOrderingCABI indexToAtomicOrder(int index) {
+ switch (index) {
+ case 0:
+ return AtomicOrderingCABI::relaxed;
+ case 1:
+ return AtomicOrderingCABI::consume;
+ case 2:
+ return AtomicOrderingCABI::acquire;
+ case 3:
+ return AtomicOrderingCABI::release;
+ case 4:
+ return AtomicOrderingCABI::acq_rel;
+ case 5:
+ return AtomicOrderingCABI::seq_cst;
+ default:
+ errs() << "Bad Atomic index\n";
+ return AtomicOrderingCABI::seq_cst;
+ }
+}
+
+/* According to atomic_base.h: __cmpexch_failure_order */
+int AtomicCasFailureOrderIndex(int index) {
+ AtomicOrderingCABI succ_order = indexToAtomicOrder(index);
+ AtomicOrderingCABI fail_order;
+ if (succ_order == AtomicOrderingCABI::acq_rel)
+ fail_order = AtomicOrderingCABI::acquire;
+ else if (succ_order == AtomicOrderingCABI::release)
+ fail_order = AtomicOrderingCABI::relaxed;
+ else
+ fail_order = succ_order;
+
+ return (int) fail_order;
+}
+
namespace {
struct CDSPass : public FunctionPass {
static char ID;
CDSPass() : FunctionPass(ID) {}
bool runOnFunction(Function &F) override;
+ StringRef getPassName() const override;
private:
void initializeCallbacks(Module &M);
bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
+ bool instrumentVolatile(Instruction *I, const DataLayout &DL);
bool isAtomicCall(Instruction *I);
bool instrumentAtomic(Instruction *I, const DataLayout &DL);
bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL);
Constant * CDSLoad[kNumberOfAccessSizes];
Constant * CDSStore[kNumberOfAccessSizes];
+ Constant * CDSVolatileLoad[kNumberOfAccessSizes];
+ Constant * CDSVolatileStore[kNumberOfAccessSizes];
Constant * CDSAtomicInit[kNumberOfAccessSizes];
Constant * CDSAtomicLoad[kNumberOfAccessSizes];
Constant * CDSAtomicStore[kNumberOfAccessSizes];
};
}
+StringRef CDSPass::getPassName() const {
+ return "CDSPass";
+}
+
static bool isVtableAccess(Instruction *I) {
if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
return Tag->isTBAAVtableAccess();
CDSFuncEntry = M.getOrInsertFunction("cds_func_entry",
VoidTy, Int8PtrTy);
+ CDSFuncExit = M.getOrInsertFunction("cds_func_exit",
+ VoidTy, Int8PtrTy);
// Get the function to call from our untime library.
for (unsigned i = 0; i < kNumberOfAccessSizes; i++) {
// void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
SmallString<32> LoadName("cds_load" + BitSizeStr);
SmallString<32> StoreName("cds_store" + BitSizeStr);
+ SmallString<32> VolatileLoadName("cds_volatile_load" + BitSizeStr);
+ SmallString<32> VolatileStoreName("cds_volatile_store" + BitSizeStr);
SmallString<32> AtomicInitName("cds_atomic_init" + BitSizeStr);
SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
CDSLoad[i] = M.getOrInsertFunction(LoadName, VoidTy, PtrTy);
CDSStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy);
+ CDSVolatileLoad[i] = M.getOrInsertFunction(VolatileLoadName,
+ Ty, PtrTy, Int8PtrTy);
+ CDSVolatileStore[i] = M.getOrInsertFunction(VolatileStoreName,
+ VoidTy, PtrTy, Ty, Int8PtrTy);
CDSAtomicInit[i] = M.getOrInsertFunction(AtomicInitName,
VoidTy, PtrTy, Ty, Int8PtrTy);
CDSAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName,
SmallVector<Instruction*, 8> AllLoadsAndStores;
SmallVector<Instruction*, 8> LocalLoadsAndStores;
+ SmallVector<Instruction*, 8> VolatileLoadsAndStores;
SmallVector<Instruction*, 8> AtomicAccesses;
std::vector<Instruction *> worklist;
bool Res = false;
bool HasAtomic = false;
+ bool HasVolatile = false;
const DataLayout &DL = F.getParent()->getDataLayout();
// errs() << "--- " << F.getName() << "---\n";
AtomicAccesses.push_back(&I);
HasAtomic = true;
} else if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
- LocalLoadsAndStores.push_back(&I);
+ LoadInst *LI = dyn_cast<LoadInst>(&I);
+ StoreInst *SI = dyn_cast<StoreInst>(&I);
+ bool isVolatile = ( LI ? LI->isVolatile() : SI->isVolatile() );
+
+ if (isVolatile) {
+ VolatileLoadsAndStores.push_back(&I);
+ HasVolatile = true;
+ } else
+ LocalLoadsAndStores.push_back(&I);
} else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
// not implemented yet
}
}
for (auto Inst : AllLoadsAndStores) {
- // Res |= instrumentLoadOrStore(Inst, DL);
- // errs() << "load and store are replaced\n";
+ Res |= instrumentLoadOrStore(Inst, DL);
+ }
+
+ for (auto Inst : VolatileLoadsAndStores) {
+ Res |= instrumentVolatile(Inst, DL);
}
for (auto Inst : AtomicAccesses) {
}
// only instrument functions that contain atomics
- if (Res && HasAtomic) {
- /*
+ if (Res && ( HasAtomic || HasVolatile) ) {
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
+ /* Unused for now
Value *ReturnAddress = IRB.CreateCall(
Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
IRB.getInt32(0));
+ */
Value * FuncName = IRB.CreateGlobalStringPtr(F.getName());
- */
- //errs() << "function name: " << F.getName() << "\n";
- //IRB.CreateCall(CDSFuncEntry, FuncName);
+ IRB.CreateCall(CDSFuncEntry, FuncName);
-/*
- EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
+ EscapeEnumerator EE(F, "cds_cleanup", true);
while (IRBuilder<> *AtExit = EE.Next()) {
- AtExit->CreateCall(TsanFuncExit, {});
+ AtExit->CreateCall(CDSFuncExit, FuncName);
}
-*/
+
Res = true;
}
}
return false;
int Idx = getMemoryAccessFuncIndex(Addr, DL);
+ if (Idx < 0)
+ return false;
// not supported by CDS yet
/* if (IsWrite && isVtableAccess(I)) {
if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy &&
ArgType != Int32PtrTy && ArgType != Int64PtrTy ) {
- //errs() << "A load or store of type ";
- //errs() << *ArgType;
- //errs() << " is passed in\n";
- return false; // if other types of load or stores are passed in
+ // if other types of load or stores are passed in
+ return false;
}
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType()));
if (IsWrite) NumInstrumentedWrites++;
return true;
}
+bool CDSPass::instrumentVolatile(Instruction * I, const DataLayout &DL) {
+ IRBuilder<> IRB(I);
+ Value *position = getPosition(I, IRB);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ assert( LI->isVolatile() );
+ Value *Addr = LI->getPointerOperand();
+ int Idx=getMemoryAccessFuncIndex(Addr, DL);
+ if (Idx < 0)
+ return false;
+
+ Value *args[] = {Addr, position};
+ Instruction* funcInst=CallInst::Create(CDSVolatileLoad[Idx], args);
+ ReplaceInstWithInst(LI, funcInst);
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ assert( SI->isVolatile() );
+ Value *Addr = SI->getPointerOperand();
+ int Idx=getMemoryAccessFuncIndex(Addr, DL);
+ if (Idx < 0)
+ return false;
+
+ Value *val = SI->getValueOperand();
+ Value *args[] = {Addr, val, position};
+ Instruction* funcInst=CallInst::Create(CDSVolatileStore[Idx], args);
+ ReplaceInstWithInst(SI, funcInst);
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) {
IRBuilder<> IRB(I);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Value *Addr = LI->getPointerOperand();
int Idx=getMemoryAccessFuncIndex(Addr, DL);
+ if (Idx < 0)
+ return false;
+
int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
Value *order = ConstantInt::get(OrdTy, atomic_order_index);
Value *args[] = {Addr, order, position};
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Addr = SI->getPointerOperand();
int Idx=getMemoryAccessFuncIndex(Addr, DL);
+ if (Idx < 0)
+ return false;
+
int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
Value *val = SI->getValueOperand();
Value *order = ConstantInt::get(OrdTy, atomic_order_index);
} else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
Value *Addr = RMWI->getPointerOperand();
int Idx=getMemoryAccessFuncIndex(Addr, DL);
+ if (Idx < 0)
+ return false;
+
int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
Value *val = RMWI->getValOperand();
Value *order = ConstantInt::get(OrdTy, atomic_order_index);
Value *Addr = CASI->getPointerOperand();
int Idx=getMemoryAccessFuncIndex(Addr, DL);
+ if (Idx < 0)
+ return false;
const unsigned ByteSize = 1U << Idx;
const unsigned BitSize = ByteSize * 8;
// atomic_init; args = {obj, order}
if (funName.contains("atomic_init")) {
+ Value *OrigVal = parameters[1];
+
Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
- Value *val = IRB.CreateBitOrPointerCast(parameters[1], Ty);
+ Value *val;
+ if (OrigVal->getType()->isPtrOrPtrVectorTy())
+ val = IRB.CreatePointerCast(OrigVal, Ty);
+ else
+ val = IRB.CreateIntCast(OrigVal, Ty, true);
+
Value *args[] = {ptr, val, position};
Instruction* funcInst = CallInst::Create(CDSAtomicInit[Idx], args);
return true;
} else if (funName.contains("atomic") &&
- funName.contains("EEEE5store") ) {
+ funName.contains("store") ) {
// does this version of call always have an atomic order as an argument?
Value *OrigVal = parameters[1];
Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
- Value *val = IRB.CreatePointerCast(OrigVal, Ty);
- Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
+ Value *val;
+ if (OrigVal->getType()->isPtrOrPtrVectorTy())
+ val = IRB.CreatePointerCast(OrigVal, Ty);
+ else
+ val = IRB.CreateIntCast(OrigVal, Ty, true);
+
+ Value *order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
Value *args[] = {ptr, val, order, position};
Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
// atomic_fetch_*; args = {obj, val, order}
if (funName.contains("atomic_fetch_") ||
- funName.contains("atomic_exchange") ) {
+ funName.contains("atomic_exchange")) {
+
+ /* TODO: implement stricter function name checking */
+ if (funName.contains("non"))
+ return false;
+
bool isExplicit = funName.contains("_explicit");
Value *OrigVal = parameters[1];
}
Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
- Value *val = IRB.CreatePointerCast(OrigVal, Ty);
+ Value *val;
+ if (OrigVal->getType()->isPtrOrPtrVectorTy())
+ val = IRB.CreatePointerCast(OrigVal, Ty);
+ else
+ val = IRB.CreateIntCast(OrigVal, Ty, true);
+
Value *order;
if (isExplicit)
order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
return true;
} else if (funName.contains("fetch")) {
- errs() << "atomic exchange captured. Not implemented yet. ";
+ errs() << "atomic fetch captured. Not implemented yet. ";
errs() << "See source file :";
getPosition(CI, IRB, true);
+ return false;
} else if (funName.contains("exchange") &&
!funName.contains("compare_exchange") ) {
- errs() << "atomic exchange captured. Not implemented yet. ";
- errs() << "See source file :";
- getPosition(CI, IRB, true);
+ if (CI->getType()->isPointerTy()) {
+ // Can not deal with this now
+ errs() << "atomic exchange captured. Not implemented yet. ";
+ errs() << "See source file :";
+ getPosition(CI, IRB, true);
+
+ return false;
+ }
+
+ Value *OrigVal = parameters[1];
+
+ Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
+ Value *val;
+ if (OrigVal->getType()->isPtrOrPtrVectorTy())
+ val = IRB.CreatePointerCast(OrigVal, Ty);
+ else
+ val = IRB.CreateIntCast(OrigVal, Ty, true);
+
+ Value *order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
+ Value *args[] = {ptr, val, order, position};
+ int op = AtomicRMWInst::Xchg;
+
+ Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args);
+ ReplaceInstWithInst(CI, funcInst);
}
/* atomic_compare_exchange_*;
Value *order_succ, *order_fail;
if (isExplicit) {
order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
- order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
+
+ if (parameters.size() > 4) {
+ order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
+ } else {
+ /* The failure order is not provided */
+ order_fail = order_succ;
+ ConstantInt * order_succ_cast = dyn_cast<ConstantInt>(order_succ);
+ int index = order_succ_cast->getSExtValue();
+
+ order_fail = ConstantInt::get(OrdTy,
+ AtomicCasFailureOrderIndex(index));
+ }
} else {
order_succ = ConstantInt::get(OrdTy,
(int) AtomicOrderingCABI::seq_cst);
Value *order_succ, *order_fail;
order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
- order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
+
+ if (parameters.size() > 4) {
+ order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
+ } else {
+ /* The failure order is not provided */
+ order_fail = order_succ;
+ ConstantInt * order_succ_cast = dyn_cast<ConstantInt>(order_succ);
+ int index = order_succ_cast->getSExtValue();
+
+ order_fail = ConstantInt::get(OrdTy,
+ AtomicCasFailureOrderIndex(index));
+ }
Value *args[] = {Addr, CmpOperand, NewOperand,
order_succ, order_fail, position};
return -1;
}
size_t Idx = countTrailingZeros(TypeSize / 8);
- assert(Idx < kNumberOfAccessSizes);
+ //assert(Idx < kNumberOfAccessSizes);
+ if (Idx >= kNumberOfAccessSizes) {
+ return -1;
+ }
return Idx;
}
legacy::PassManagerBase &PM) {
PM.add(new CDSPass());
}
+
+/* Enable the pass when opt level is greater than 0 */
+static RegisterStandardPasses
+ RegisterMyPass1(PassManagerBuilder::EP_OptimizerLast,
+registerCDSPass);
+
+/* Enable the pass when opt level is 0 */
static RegisterStandardPasses
- RegisterMyPass(PassManagerBuilder::EP_OptimizerLast,
+ RegisterMyPass2(PassManagerBuilder::EP_EnabledOnOptLevel0,
registerCDSPass);