--- /dev/null
+#!/bin/bash
+
+MABAINLIB="../src"
+MABAINDIR="mabain/examples"
+
+TESTS="silo mabain"
+
+TOTAL_RUN=$1
+
+if [ -z "$1" ]; then
+ TOTAL_RUN=10
+fi
+
+function run_silo_test {
+ COUNT_ASSERT=0
+ EXE='./dbtest --verbose -t 5'
+
+ cd 'silo/out-perf.debug.check.masstree/benchmarks/'
+ for i in `seq 1 1 $TOTAL_RUN`
+ do
+ OUTPUT="$($EXE 2>&1)"
+ ASSERT="$(echo "$OUTPUT" | grep "Assert")"
+ if [ -n "$ASSERT" ] ; then
+ ((++COUNT_ASSERT))
+ fi
+ done
+
+ cd ../../..
+
+ AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f")
+ echo "Runs: ${TOTAL_RUN} | Assertions: ${COUNT_ASSERT} | Assertion rate: ${AVG_ASSERT}%"
+}
+
+function run_mabain_test {
+ export LD_LIBRARY_PATH="${MABAINLIB}"
+
+ COUNT_ASSERT=0
+ EXE='./mb_multi_thread_insert_test_assert'
+
+ cd ${MABAINDIR}
+ for i in `seq 1 1 $TOTAL_RUN`
+ do
+ OUTPUT="$(/usr/bin/time -f "time: %E" $EXE 2>&1)"
+ ASSERT="$(echo "$OUTPUT" | grep "Assert")"
+ if [ -n "$ASSERT" ] ; then
+ ((++COUNT_ASSERT))
+ fi
+
+ rm ./multi_test/* 2> /dev/null
+ done
+
+ cd ../..
+
+ AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f")
+ echo "Runs: ${TOTAL_RUN} | Assertions: ${COUNT_ASSERT} | Assertion rate: ${AVG_ASSERT}%"
+}
+
+echo "** Assertion test for some application benchmarks: ${TESTS} **"
+for t in ${TESTS}
+do
+ echo -n "${t} "
+ run_${t}_test
+done
--- /dev/null
+#!/bin/bash
+
+TOTAL_RUN=$1
+
+if [ -z "$1" ]; then
+ TOTAL_RUN=10
+fi
+
+# Clear data
+rm *.log 2> /dev/null
+
+echo "** Performance test for application benchmarks **"
+# Run in all-core configuration
+if [ ! -d "all-core" ]; then
+ mkdir all-core
+fi
+
+echo "Running each benchmark with multiple cores for ${TOTAL_RUN} times"
+#rm all-core/*.log 2> /dev/null
+./run.sh $TOTAL_RUN
+mv *.log all-core
+echo "Done"
+python calculator.py all-core
+
+# Run in single-core configuration
+if [ ! -d "single-core" ]; then
+ mkdir single-core
+fi
+
+echo "Running each benchmark with a single core for ${TOTAL_RUN} times"
+#rm single-core/*.log 2> /dev/null
+taskset -c 0 ./run.sh $TOTAL_RUN
+mv *.log single-core
+echo "Done"
+python calculator.py single-core
+
+++ /dev/null
-#!/bin/bash
-
-MABAINLIB="../src"
-MABAINDIR="mabain/examples"
-
-TESTS="silo"
-
-TOTAL_RUN=$1
-
-if [ -z "$1" ]; then
- TOTAL_RUN=10
-fi
-
-function run_silo_test {
- echo "Silo assertion test"
- COUNT_ASSERT=0
- EXE='./dbtest --verbose -t 5'
-
- cd 'silo/out-perf.debug.check.masstree/benchmarks/'
- for i in `seq 1 1 $TOTAL_RUN`
- do
- OUTPUT="$($EXE 2>&1)"
- ASSERT="$(echo "$OUTPUT" | grep "Assert")"
- if [ -n "$ASSERT" ] ; then
- ((++COUNT_ASSERT))
- fi
- done
-
- cd ../../..
-
- AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f")
- echo "Runs: ${TOTAL_RUN} | Assertion rate: ${AVG_ASSERT}%"
-}
-
-function run_mabain_test {
- export LD_LIBRARY_PATH="$${MABAINLIB}"
-
- echo "Mabain assertion test"
- COUNT_ASSERT=0
- EXE='./mb_multi_thread_insert_test_assert'
-
- cd ${MABAINDIR}
- for i in `seq 1 1 $TOTAL_RUN`
- do
- OUTPUT="$(/usr/bin/time -f "time: %E" $EXE 2>&1)"
- ASSERT="$(echo "$OUTPUT" | grep "Assert")"
- if [ -n "$ASSERT" ] ; then
- ((++COUNT_ASSERT))
- fi
-
- rm ./multi_test/* 2> /dev/null
- done
-
- cd ../..
-
- AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f")
- echo "Runs: ${TOTAL_RUN} | Assertion rate: ${AVG_ASSERT}%"
-}
-
-#function run_all_tests {
-# for t in ${TESTS}
-# do
-# echo "running ${t}"
-# (run_${t}_test 2>&1) > "${t}.log"
-# run_${t}_test &> "${t}.log"
-# done
-#}
-
-run_silo_test
-run_mabain_test
import statistics
import sys
+def checkRace(content):
+ has_race = re.search(r'race', content)
+ if has_race:
+ print 'data race detected'
+ else:
+ print 'data race not detected'
+
def GdaxStatistics(filename):
gdax_data = []
with open(filename, 'r') as f:
content = f.read()
+ checkRace(content)
+
allruns = re.findall(r'(0-0\.49.*?19.99 s: \d+)', content, flags=re.DOTALL)
for run in allruns:
iterations = 0
data = []
with open(filename, 'r') as f:
content = f.read()
+ checkRace(content)
allruns = re.findall(r'agg_throughput: (\d+\.?\d*) ops', content)
data = [float(x) for x in allruns]
data = []
with open(filename, 'r') as f:
content = f.read()
+ checkRace(content)
allruns = re.findall(r'real.*?(\d+)m(\d+\.\d+)s', content)
for run in allruns:
(minute,second) = run
def JsbenchStatistics(filename):
with open(filename, 'r') as f:
content = f.read()
+ checkRace(content)
result = re.search(r'(Final results.*?runs)', content, flags=re.DOTALL)
print(result.group(0))
--- /dev/null
+#!/bin/sh
+
+# Test application benchmarks
+./app_test_all.sh
+echo ""
+
+# Test CDSChecker data structure benchmarks
+cd cdschecker_modified_benchmarks
+./test_all.sh
+cd ..
+echo ""
+
+# Test data structures with bugs that tsan11/tsan11rec cannot detect
+cd tsan11-missingbug
+./test_all.sh
+cd ..
+echo ""
+
+# Test assertion failures in Silo and Mabain
+./app_assertion_test.sh
cd ..
}
-function run_all_tests {
- for t in ${TESTS}
- do
- echo "running ${t}"
- (run_${t}_test 2>&1) > "${t}.log"
-# run_${t}_test &> "${t}.log"
- done
-}
-
-# Remove previous output files
-rm *.log 2> /dev/null
rm $REDUNDANT 2> /dev/null
-run_all_tests
+echo "Benchmarks: ${TESTS}"
+for t in ${TESTS}
+do
+ rm "${t}.log" 2> /dev/null
+ echo "Running ${t}"
+ (run_${t}_test 2>&1) > "${t}.log"
+done
cd tsan11-benchmarks
git checkout tsan11-docker
cp /data/scripts/build.sh .
+cp /data/scripts/do_test_all.sh .
+cp /data/scripts/app_assertion_test.sh .
+cp /data/scripts/app_test_all.sh .
cp /data/scripts/run.sh .
-cp /data/scripts/test_all.sh .
cp /data/scripts/calculator.py .
./build.sh
cd ..
cd tsan11rec-benchmarks
git checkout tsan11-docker
cp /data/scripts/build.sh .
+cp /data/scripts/do_test_all.sh .
+cp /data/scripts/app_assertion_test.sh .
+cp /data/scripts/app_test_all.sh .
cp /data/scripts/run.sh .
-cp /data/scripts/test_all.sh .
cp /data/scripts/calculator.py .
sed -i "s/tsan11/tsan11rec/g" clang clang++ gcc g++ build.sh run.sh
./build.sh
+++ /dev/null
-#!/bin/bash
-
-TOTAL_RUN=$1
-
-if [ -z "$1" ]; then
- TOTAL_RUN=10
-fi
-
-# Clear data
-rm *.log 2> /dev/null
-
-echo "test application benchmarks"
-# Run in all-core configuration
-if [ ! -d "all-core" ]; then
- mkdir all-core
-fi
-
-echo "running each benchmark with multiple cores for ${TOTAL_RUN} times"
-rm all-core/*.log 2> /dev/null
-./run.sh $TOTAL_RUN
-mv *.log all-core
-echo "done"
-python calculator.py all-core
-
-# Run in single-core configuration
-if [ ! -d "single-core" ]; then
- mkdir single-core
-fi
-
-echo "running each benchmark with a single core for ${TOTAL_RUN} times"
-rm single-core/*.log 2> /dev/null
-taskset -c 0 ./run.sh $TOTAL_RUN
-mv *.log single-core
-echo "done"
-python calculator.py single-core
-