// Simulate as the data protected by the lock.
static size_t locked_data;
static std::atomic<RcuData*> rcu_data;
- // For RCU, we mostly want to benchmark the readers (cause it's designed for
- // very fast readers and occasional writers). We have a writer thread that
- // runs nonstop until all other reader threads are done.
+ // For RCU, we want to benchmark two things:
+ // (1) Readers --- we have a writer thread that runs nonstop until all other
+ // reader threads are done with a certain number of reads.
+ // (2) Writers --- we have several reader threads that run nonstop until a
+ // writer thread finishes a certain number of writes.
static std::atomic_uint rcu_readers_num;
+ static std::atomic_uint rcu_writers_num;
// MicroLock
static size_t s_nMicroLockPassCount;
// MicroSpinLock
GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
- GetConfigNonZeroExpected(RcuWriterPassCount, 500);
// Every 100 ms by default there will be a writer.
GetConfigNonZeroExpected(RcuWriterFrequency, 100);
+ GetConfigNonZeroExpected(RcuWriterPassCount, 500);
GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
rcu_data.store(new RcuData(), std::memory_order_relaxed);
}
- static void run_rcu_writer_sync() {
+ static void rcu_write_sync() {
+ auto *old_data = rcu_data.load(std::memory_order_consume);
+ auto *new_data = new RcuData(*old_data);
+ new_data->d1++;
+ new_data->d2++;
+ rcu_data.store(new_data, std::memory_order_release);
+ folly::synchronize_rcu();
+ delete old_data;
+ }
+
+ static void rcu_write_retire() {
+ auto *old_data = rcu_data.load(std::memory_order_consume);
+ auto *new_data = new RcuData(*old_data);
+ new_data->d1++;
+ new_data->d2++;
+ rcu_data.store(new_data, std::memory_order_release);
+ folly::rcu_retire(old_data);
+ }
+
+ static void run_rcu_writer_sync_nonstop(size_t pass_count) {
while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
- auto *old_data = rcu_data.load(std::memory_order_consume);
- auto *new_data = new RcuData(*old_data);
- new_data->d1++;
- new_data->d2++;
- rcu_data.store(new_data, std::memory_order_release);
- folly::synchronize_rcu();
- delete old_data;
+ rcu_write_sync();
std::this_thread::sleep_for(
std::chrono::milliseconds(s_nRcuWriterFrequency));
}
}
- static void run_rcu_writer_no_sync() {
+ static void run_rcu_writer_retire_nonstop(size_t pass_count) {
while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
- auto *old_data = rcu_data.load(std::memory_order_consume);
- auto *new_data = new RcuData(*old_data);
- new_data->d1++;
- new_data->d2++;
- rcu_data.store(new_data, std::memory_order_release);
- folly::rcu_retire(old_data);
+ rcu_write_retire();
std::this_thread::sleep_for(
std::chrono::milliseconds(s_nRcuWriterFrequency));
}
}
- static void run_rcu_reader(size_t pass_count) {
+ static void run_rcu_reader_pass_count(size_t pass_count) {
size_t sum = 0;
for (size_t count = 0; count < pass_count; count++) {
folly::rcu_reader g;
EXPECT_GT(sum, 0);
}
+ static void run_rcu_writer_sync_pass_count(size_t pass_count) {
+ for (size_t count = 0; count < pass_count; count++) {
+ rcu_write_sync();
+ }
+ rcu_writers_num.fetch_sub(1, std::memory_order_release);
+ }
+
+ static void run_rcu_writer_retire_pass_count(size_t pass_count) {
+ for (size_t count = 0; count < pass_count; count++) {
+ rcu_write_retire();
+ }
+ rcu_writers_num.fetch_sub(1, std::memory_order_release);
+ }
+
+ static void run_rcu_reader_nonstop(size_t pass_count) {
+ size_t sum = 0;
+ while (rcu_writers_num.load(std::memory_order_acquire) > 0) {
+ folly::rcu_reader g;
+ auto *data = rcu_data.load(std::memory_order_consume);
+ sum += (data->d1 + data->d2);
+ }
+ EXPECT_GT(sum, 0);
+ }
+
template <typename Lock>
static void run_rw_lock(Lock *l, size_t pass_count,
unsigned write_percentage) {
}
}
- template <typename WriterFunc>
- static void FollyRcuThreading(WriterFunc writer_func) {
+ template <typename WriterFunc, typename ReaderFunc>
+ static void FollyRcuThreading(WriterFunc writer_func,
+ ReaderFunc reader_func) {
rcu_readers_num.store(s_nThreadCount - 1, std::memory_order_release);
+ rcu_writers_num.store(1, std::memory_order_release);
std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
// One of the threads is a writer.
- threads[0] = std::thread(writer_func);
+ threads[0] = std::thread(writer_func, s_nRcuWriterPassCount);
for (size_t i = 1; i < s_nThreadCount; i++) {
- threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
+ threads[i] = std::thread(reader_func, s_nRcuReaderPassCount);
}
for (size_t i = 0; i < s_nThreadCount; i++) {
threads[i].join();
size_t FollySyncTest_Parallel::locked_data;
std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
std::atomic_uint FollySyncTest_Parallel::rcu_readers_num;
+std::atomic_uint FollySyncTest_Parallel::rcu_writers_num;
size_t FollySyncTest_Parallel::s_nThreadCount;
size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
-TEST_F(FollySyncTest_Parallel, FollyRCU_Sync) {
- FollyRcuThreading(run_rcu_writer_sync);
+TEST_F(FollySyncTest_Parallel, FollyRCU_NonstopReaderSync) {
+ FollyRcuThreading(run_rcu_writer_sync_nonstop, run_rcu_reader_pass_count);
}
-TEST_F(FollySyncTest_Parallel, FollyRCU_NoSync) {
- FollyRcuThreading(run_rcu_writer_no_sync);
+TEST_F(FollySyncTest_Parallel, FollyRCU_NonstopReaderNoSync) {
+ FollyRcuThreading(run_rcu_writer_retire_nonstop, run_rcu_reader_pass_count);
}
TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {