#pragma once
+#include <assert.h>
+#include <boost/noncopyable.hpp>
+#include <errno.h>
+#include <glog/logging.h>
+#include <semaphore.h>
#include <atomic>
#include <functional>
+#include <mutex>
#include <thread>
#include <unordered_set>
#include <vector>
-#include <boost/noncopyable.hpp>
-#include <semaphore.h>
-#include <errno.h>
-#include <assert.h>
-#include <glog/logging.h>
#include <folly/ScopeGuard.h>
#include <folly/detail/CacheLocality.h>
return rv;
}
};
+
+/**
+ * DeterministicMutex is a drop-in replacement of std::mutex that
+ * cooperates with DeterministicSchedule.
+ */
+struct DeterministicMutex {
+ std::mutex m;
+
+ DeterministicMutex() = default;
+ ~DeterministicMutex() = default;
+ DeterministicMutex(DeterministicMutex const&) = delete;
+ DeterministicMutex& operator=(DeterministicMutex const&) = delete;
+
+ void lock() {
+ FOLLY_TEST_DSCHED_VLOG(this << ".lock()");
+ while (!try_lock()) {
+ // Not calling m.lock() in order to avoid deadlock when the
+ // mutex m is held by another thread. The deadlock would be
+ // between the call to m.lock() and the lock holder's wait on
+ // its own tls_sem scheduling semaphore.
+ }
+ }
+
+ bool try_lock() {
+ DeterministicSchedule::beforeSharedAccess();
+ bool rv = m.try_lock();
+ FOLLY_TEST_DSCHED_VLOG(this << ".try_lock() -> " << rv);
+ DeterministicSchedule::afterSharedAccess();
+ return rv;
+ }
+
+ void unlock() {
+ FOLLY_TEST_DSCHED_VLOG(this << ".unlock()");
+ m.unlock();
+ }
+};
}
} // namespace folly::test
}
}
+TEST(DeterministicSchedule, buggyAdd) {
+ for (bool bug : {false, true}) {
+ DeterministicSchedule sched(DeterministicSchedule::uniform(0));
+ if (bug) {
+ FOLLY_TEST_DSCHED_VLOG("Test with race condition");
+ } else {
+ FOLLY_TEST_DSCHED_VLOG("Test without race condition");
+ }
+ DeterministicMutex m;
+ // The use of DeterinisticAtomic is not needed here, but it makes
+ // it easier to understand the sequence of events in logs.
+ DeterministicAtomic<int> test{0};
+ DeterministicAtomic<int> baseline{0};
+ int numThreads = 10;
+ std::vector<std::thread> threads(numThreads);
+ for (int t = 0; t < numThreads; ++t) {
+ threads[t] = DeterministicSchedule::thread([&, t] {
+ baseline.fetch_add(1);
+ // Atomic increment of test protected by mutex m
+ do {
+ // Some threads use lock() others use try_lock()
+ if ((t & 1) == 0) {
+ m.lock();
+ } else {
+ if (!m.try_lock()) {
+ continue;
+ }
+ }
+ int newval = test.load() + 1;
+ if (bug) {
+ // Break the atomicity of the increment operation
+ m.unlock();
+ m.lock();
+ }
+ test.store(newval);
+ m.unlock();
+ break;
+ } while (true);
+ }); // thread lambda
+ } // for t
+ for (auto& t : threads) {
+ DeterministicSchedule::join(t);
+ }
+ if (!bug) {
+ EXPECT_EQ(test.load(), baseline.load());
+ } else {
+ if (test.load() == baseline.load()) {
+ FOLLY_TEST_DSCHED_VLOG("Didn't catch the bug");
+ } else {
+ FOLLY_TEST_DSCHED_VLOG("Caught the bug");
+ }
+ }
+ } // for bug
+} // TEST
+
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);