Refactors folly sync test cases
authorPeizhao Ou <peizhaoo@uci.edu>
Mon, 12 Feb 2018 20:19:33 +0000 (12:19 -0800)
committerPeizhao Ou <peizhaoo@uci.edu>
Mon, 12 Feb 2018 20:19:33 +0000 (12:19 -0800)
folly/stress-test/stress-parallel-folly-sync.cpp
folly/stress-test/stress-sequential-folly-sync.cpp

index 36266e27f697e18d9e9f970b8fe07091dc85a087..addc92ce94b1fb8dfb5288baf7292b5624409694 100644 (file)
@@ -8,6 +8,10 @@ protected:
   // Simulate as the data protected by the lock.
   static size_t locked_data;
   static std::atomic<RcuData*> rcu_data;
+  // For RCU, we mostly want to benchmark the readers (cause it's designed for
+  // very fast readers and occasional writers). We have a writer thread that
+  // runs nonstop until all other reader threads are done.
+  static std::atomic_bool rcu_readers_done;
   // MicroLock
   static size_t s_nMicroLockPassCount;
   // MicroSpinLock
@@ -50,23 +54,8 @@ protected:
     rcu_data.store(new RcuData(), std::memory_order_relaxed);
   }
 
-  static void run_rcu_sync(size_t pass_count, unsigned write_percentage) {
-    for (size_t count = 0; count < pass_count; count++) {
-      if (rand(100) < write_percentage) {
-        auto *old_data = rcu_data.load(std::memory_order_relaxed);
-        auto *new_data = new RcuData();
-        rcu_data.store(new_data, std::memory_order_relaxed);
-        folly::rcu_retire(old_data);
-      } else {
-        folly::rcu_reader g;
-      }
-    }
-  }
-
-  // writer_freq is the milliseconds a writer should wait before another writer
-  // happens.
-  static void run_rcu_writer_sync(size_t pass_count, unsigned writer_freq) {
-    for (size_t count = 0; count < pass_count; count++) {
+  static void run_rcu_writer_sync() {
+    while (!rcu_readers_done.load(std::memory_order_acquire)) {
       auto *old_data = rcu_data.load(std::memory_order_relaxed);
       auto *new_data = new RcuData(*old_data);
       new_data->d1++;
@@ -74,21 +63,21 @@ protected:
       rcu_data.store(new_data, std::memory_order_relaxed);
       folly::synchronize_rcu();
       delete old_data;
-      std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
+      std::this_thread::sleep_for(
+          std::chrono::milliseconds(s_nRcuWriterFrequency));
     }
   }
 
-  // writer_freq is the milliseconds a writer should wait before another writer
-  // happens.
-  static void run_rcu_writer_no_sync(size_t pass_count, unsigned writer_freq) {
-    for (size_t count = 0; count < pass_count; count++) {
+  static void run_rcu_writer_no_sync() {
+    while (!rcu_readers_done.load(std::memory_order_acquire)) {
       auto *old_data = rcu_data.load(std::memory_order_relaxed);
       auto *new_data = new RcuData(*old_data);
       new_data->d1++;
       new_data->d2++;
       rcu_data.store(new_data, std::memory_order_relaxed);
       folly::rcu_retire(old_data);
-      std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
+      std::this_thread::sleep_for(
+          std::chrono::milliseconds(s_nRcuWriterFrequency));
     }
   }
 
@@ -99,6 +88,7 @@ protected:
       auto *data = rcu_data.load(std::memory_order_relaxed);
       sum += (data->d1 + data->d2);
     }
+    std::cout << "Reader done" << std::endl;
     // Just want to simulate the reading.
     EXPECT_GT(sum, 0);
   }
@@ -145,16 +135,17 @@ protected:
   static void FollyRcuThreading(WriterFunc writer_func) {
     // One of the threads is a writer.
     size_t reader_thrd_cnt = s_nThreadCount - 1;
+    rcu_readers_done.store(false, std::memory_order_release);
     std::unique_ptr<std::thread[]> reader_threads(
         new std::thread[reader_thrd_cnt]);
-    std::thread writer_thread(writer_func, s_nRcuWriterPassCount,
-                              s_nRcuWriterFrequency);
+    std::thread writer_thread(writer_func);
     for (size_t i = 0; i < reader_thrd_cnt; i++) {
       reader_threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
     }
     for (size_t i = 0; i < reader_thrd_cnt; i++) {
       reader_threads[i].join();
     }
+    rcu_readers_done.store(true, std::memory_order_release);
     writer_thread.join();
   }
 
@@ -178,6 +169,7 @@ protected:
 
 size_t FollySyncTest_Parallel::locked_data;
 std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
+std::atomic_bool FollySyncTest_Parallel::rcu_readers_done;
 size_t FollySyncTest_Parallel::s_nThreadCount;
 size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
 size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
index 8a7a1d1fce2843996ed0c7fcde8de61459534157..9622230407976c2653e6dfbad690c6574df0acd2 100644 (file)
@@ -6,6 +6,7 @@ class FollySyncTest_Sequential: public cds_test::stress_fixture {
 protected:
   // Simulate as the data protected by the lock.
   static size_t locked_data;
+  static std::atomic<RcuData*> rcu_data;
   // MicroLock
   static size_t s_nMicroLockPassCount;
   // MicroSpinLock
@@ -21,6 +22,7 @@ protected:
   // RCU
   static size_t s_nRcuSyncPassCount;
   static size_t s_nRcuNoSyncPassCount;
+  static size_t s_nRcuReaderOnlyPassCount;
 
   static void SetUpTestCase() {
     const cds_test::config& cfg = get_config("SequentialFollySync");
@@ -32,36 +34,66 @@ protected:
     GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
     GetConfigNonZeroExpected(RcuSyncPassCount, 180000);
     GetConfigNonZeroExpected(RcuNoSyncPassCount, 3500000);
+    GetConfigNonZeroExpected(RcuReaderOnlyPassCount, 3000000);
+
+    // Initialize the RCU protected data.
+    rcu_data.store(new RcuData(), std::memory_order_relaxed);
+  }
+
+  static void run_rcu_reader_only(size_t pass_count) {
+    size_t sum = 1;
+    for (size_t count = 0; count < pass_count; count++) {
+      folly::rcu_reader g;
+      auto *data = rcu_data.load(std::memory_order_relaxed);
+      sum += (data->d1 + data->d2);
+    }
+    EXPECT_EQ(sum, 1);
   }
 
   static void run_rcu_sync(size_t pass_count) {
     for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+      size_t sum = 0;
       for (size_t count = 0; count < pass_count; count++) {
         for (int i = 0; i < 100; ++i) {
           if (i < write_percentage) {
-            RcuData* data = new RcuData();
-            folly::rcu_retire(data);
+            auto* old_data = rcu_data.load(std::memory_order_relaxed);
+            auto* new_data = new RcuData(*old_data);
+            new_data->d1++;
+            new_data->d2++;
+            rcu_data.store(new_data, std::memory_order_relaxed);
             folly::synchronize_rcu();
+            delete old_data;
           } else {
             folly::rcu_reader g;
+            auto* data = rcu_data.load(std::memory_order_relaxed);
+            sum += (data->d1 + data->d2);
           }
         }
       }
+      EXPECT_GT(sum, 0);
     }
   }
 
   static void run_rcu_no_sync(size_t pass_count) {
     for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+      size_t sum = 0;
       for (size_t count = 0; count < pass_count; count++) {
         for (int i = 0; i < 100; ++i) {
           if (i < write_percentage) {
-            RcuData* data = new RcuData();
-            folly::rcu_retire(data);
+            auto* old_data = rcu_data.load(std::memory_order_relaxed);
+            auto* new_data = new RcuData(*old_data);
+            new_data->d1++;
+            new_data->d2++;
+            rcu_data.store(new_data, std::memory_order_relaxed);
+            folly::rcu_retire(old_data);
           } else {
             folly::rcu_reader g;
+            auto* data = rcu_data.load(std::memory_order_relaxed);
+            sum += (data->d1 + data->d2);
           }
         }
       }
+      EXPECT_GT(sum, 0);
     }
   }
 
@@ -106,6 +138,7 @@ protected:
 };
 
 size_t FollySyncTest_Sequential::locked_data;
+std::atomic<RcuData*> FollySyncTest_Sequential::rcu_data;
 size_t FollySyncTest_Sequential::s_nMicroLockPassCount;
 size_t FollySyncTest_Sequential::s_nMicroSpinLockPassCount;
 size_t FollySyncTest_Sequential::s_nPicoSpinLockPassCount;
@@ -114,6 +147,19 @@ size_t FollySyncTest_Sequential::s_nRWSpinLockPassCount;
 size_t FollySyncTest_Sequential::s_nRWTicketSpinLockPassCount;
 size_t FollySyncTest_Sequential::s_nRcuSyncPassCount;
 size_t FollySyncTest_Sequential::s_nRcuNoSyncPassCount;
+size_t FollySyncTest_Sequential::s_nRcuReaderOnlyPassCount;
+
+TEST_F(FollySyncTest_Sequential, FollyRCU_ReaderOnly) {
+  run_rcu_reader_only(s_nRcuReaderOnlyPassCount);
+}
+
+TEST_F(FollySyncTest_Sequential, FollyRCU_Sync) {
+  run_rcu_sync(s_nRcuSyncPassCount);
+}
+
+TEST_F(FollySyncTest_Sequential, FollyRCU_NoSync) {
+  run_rcu_no_sync(s_nRcuNoSyncPassCount);
+}
 
 TEST_F(FollySyncTest_Sequential, FollyMicroSpinLock) {
   run_small_lock<MicroSpinLock>(s_nMicroSpinLockPassCount);
@@ -127,14 +173,6 @@ TEST_F(FollySyncTest_Sequential, FollyMicroLock) {
   run_small_lock<MicroLock>(s_nMicroLockPassCount);
 }
 
-TEST_F(FollySyncTest_Sequential, FollyRCU_Sync) {
-  run_rcu_sync(s_nRcuSyncPassCount);
-}
-
-TEST_F(FollySyncTest_Sequential, FollyRCU_NoSync) {
-  run_rcu_no_sync(s_nRcuNoSyncPassCount);
-}
-
 TEST_F(FollySyncTest_Sequential, FollyRWTicketSpinLock_32) {
   run_rw_lock<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount);
 }