--- /dev/null
+#ifndef _CHASE_LEV_DEQUE_H
+#define _CHASE_LEV_DEQUE_H
+
+#include <atomic>
+#include <cds/sync/backoff.h>
+#include <cstdlib>
+#include <inttypes.h>
+#include <iostream>
+
+namespace cds_others {
+
+#define EMPTY 0xffffffff
+#define ABORT 0xfffffffe
+
+using std::memory_order_seq_cst;
+using std::memory_order_release;
+using std::memory_order_acquire;
+using std::memory_order_acq_rel;
+using std::memory_order_relaxed;
+using std::atomic_int;
+using std::atomic_size_t;
+using std::atomic_uintptr_t;
+using std::size_t;
+
+class ChaseLevDeque {
+private:
+ atomic_size_t top;
+ atomic_size_t bottom;
+ atomic_uintptr_t array; /* Atomic(Array *) */
+
+public:
+ struct Array {
+ atomic_size_t size;
+ atomic_int buffer[];
+ };
+
+ ChaseLevDeque() {
+ Array *a = (Array *)calloc(1, sizeof(Array) + 2 * sizeof(atomic_int));
+ array.store((uintptr_t)a, memory_order_relaxed);
+ top.store(0, memory_order_relaxed);
+ bottom.store(0, memory_order_relaxed);
+ a->size.store(2, memory_order_relaxed);
+ }
+
+ int take() {
+ size_t b = bottom.load(memory_order_relaxed) - 1;
+ Array *a = (Array *)array.load(memory_order_relaxed);
+ bottom.store(b, memory_order_relaxed);
+ atomic_thread_fence(memory_order_seq_cst);
+ size_t t = top.load(memory_order_relaxed);
+ int x;
+ if (t <= b) {
+ /* Non-empty queue. */
+ x = a->buffer[b % a->size.load(memory_order_relaxed)].load(
+ memory_order_relaxed);
+ if (t == b) {
+ /* Single last element in queue. */
+ if (!top.compare_exchange_strong(t, t + 1, memory_order_seq_cst,
+ memory_order_relaxed))
+ /* Failed race. */
+ x = EMPTY;
+ bottom.store(b + 1, memory_order_relaxed);
+ }
+ } else { /* Empty queue. */
+ x = EMPTY;
+ bottom.store(b + 1, memory_order_relaxed);
+ }
+ return x;
+ }
+
+ void resize() {
+ Array *a = (Array *)array.load(memory_order_relaxed);
+ size_t size = a->size.load(memory_order_relaxed);
+ size_t new_size = size << 1;
+ Array *new_a =
+ (Array *)calloc(1, new_size * sizeof(atomic_int) + sizeof(Array));
+ size_t t = top.load(memory_order_relaxed);
+ size_t b = bottom.load(memory_order_relaxed);
+ new_a->size.store(new_size, memory_order_relaxed);
+ size_t i;
+ for (i = t; i < b; i++) {
+ new_a->buffer[i % new_size].store(
+ a->buffer[i % size].load(memory_order_relaxed), memory_order_relaxed);
+ }
+ array.store((uintptr_t)new_a, memory_order_release);
+ // std::cout << "Resize to " << new_size << "\n";
+ }
+
+ void push(int x) {
+ size_t b = bottom.load(memory_order_relaxed);
+ size_t t = top.load(memory_order_acquire);
+ Array *a = (Array *)array.load(memory_order_relaxed);
+ if (b - t > a->size.load(memory_order_relaxed) - 1) /* Full queue. */ {
+ resize();
+ // Bug in paper...should have next line...
+ a = (Array *)array.load(memory_order_relaxed);
+ }
+ a->buffer[b % a->size.load(memory_order_relaxed)].store(
+ x, memory_order_relaxed);
+ atomic_thread_fence(memory_order_release);
+ bottom.store(b + 1, memory_order_relaxed);
+ }
+
+ int steal() {
+ size_t t = top.load(memory_order_acquire);
+ atomic_thread_fence(memory_order_seq_cst);
+ size_t b = bottom.load(memory_order_acquire);
+ int x = EMPTY;
+ if (t < b) {
+ /* Non-empty queue. */
+ Array *a = (Array *)array.load(memory_order_acquire);
+ x = a->buffer[t % a->size.load(memory_order_relaxed)].load(
+ memory_order_relaxed);
+ if (!top.compare_exchange_strong(t, t + 1, memory_order_seq_cst,
+ memory_order_relaxed))
+ /* Failed race. */
+ return ABORT;
+ }
+ return x;
+ }
+};
+
+} // namespace cds_others
+
+#endif
#ifndef _SEQLOCK_H
#define _SEQLOCK_H
+#include "backoff.h"
#include <atomic>
namespace cds_others {
}
int read() {
+ ExpBackoff backoff;
while (true) {
- int old_seq = seq_.load(memory_order_acquire); // acquire
- if (old_seq % 2 == 1)
+ int old_seq = seq_.load(memory_order_acquire);
+ if (old_seq % 2 == 1) {
+ backoff();
continue;
+ }
- int res = data_.load(memory_order_acquire); // acquire
- if (seq_.load(memory_order_relaxed) == old_seq) { // relaxed
+ int res = data_.load(memory_order_acquire);
+ if (seq_.load(memory_order_relaxed) == old_seq) {
return res;
}
}
}
void write(int new_data) {
+ ExpBackoff backoff;
while (true) {
// This might be a relaxed too
- int old_seq = seq_.load(memory_order_acquire); // acquire
- if (old_seq % 2 == 1)
+ int old_seq = seq_.load(memory_order_acquire);
+ if (old_seq % 2 == 1) {
+ backoff();
continue; // Retry
+ }
// Should be relaxed!!!
if (seq_.compare_exchange_strong(old_seq, old_seq + 1,
memory_order_relaxed,
- memory_order_relaxed)) // relaxed
+ memory_order_relaxed)) {
break;
+ }
}
// Update the data
--- /dev/null
+#ifndef _TICKET_LOCK_H
+#define _TICKET_LOCK_H
+
+#include "backoff.h"
+#include <atomic>
+
+namespace cds_others {
+
+class TicketLock {
+ /**
+ This ticket lock implementation is derived from the original Mellor-Crummey
+ & Scott paper <Algorithms for Scalable Synchronization on SharedMemory
+ Multiprocessors> in 1991. It assumes that the ticket and turn counter are
+ large enough to accommodate the maximum number of simultaneous requests for
+ the lock.
+ */
+public:
+ TicketLock() {
+ ticket.store(0, std::memory_order_relaxed);
+ turn.store(0, std::memory_order_relaxed);
+ }
+
+ void lock() {
+ // First grab a ticket
+ unsigned my_ticket = ticket.fetch_add(1, std::memory_order_relaxed);
+ // Spinning for my turn
+ ExpBackoff backoff;
+ while (true) {
+ unsigned my_turn = turn.load(std::memory_order_acquire);
+ if (my_turn == my_ticket) {
+ // Now it's my turn
+ return;
+ } else {
+ backoff();
+ }
+ }
+ }
+
+ void unlock() {
+ unsigned my_turn = turn.load(std::memory_order_relaxed);
+ turn.store(my_turn + 1, std::memory_order_release);
+ }
+
+private:
+ std::atomic_uint ticket;
+ std::atomic_uint turn;
+};
+
+} // namespace cds_others
+
+#endif
set(CDSSTRESS_STACK_SOURCES
../main.cpp
+ spinlock_driver.cpp
+ deque_driver.cpp
barrier_driver.cpp
seqlock_driver.cpp
rwlock_driver.cpp
mcslock_driver.cpp
- spinlock_driver.cpp
)
include_directories(
+#include "common.h"
#include <atomic>
#include <cds/gc/dhp.h>
#include <cds/gc/hp.h>
typedef cds_others::SpinBarrier Barrier;
+static size_t s_nBarrierThreadCount = 6;
+static size_t s_nBarrierPassCount = 100000000;
+
class BarrierTest : public cds_test::stress_fixture {
protected:
static Barrier *barrier;
static int count;
- static const int kThreads = 6;
- static const int kPassCount = 10000;
- static void SetUpTestCase() {}
+ static void SetUpTestCase() {
+ cds_test::config const &cfg = get_config("Misc");
+ GetConfig(BarrierPassCount);
+ GetConfig(BarrierThreadCount);
+ }
static void TearDownTestCase() {
- assert (count == kPassCount*kPassCount);
+ if (count != s_nBarrierPassCount) {
+ cout << "Incorrect" << endl;
+ }
}
static void Thread() {
- for (int i = 0; i < kPassCount; i++) {
- for (int j = 0; j < kPassCount; j++) {
- if (barrier->wait()) {
- count++;
- }
+ for (ullong i = 0; i < s_nBarrierPassCount; i++) {
+ if (barrier->wait()) {
+ count++;
}
}
}
Barrier *BarrierTest::barrier;
int BarrierTest::count;
-const int BarrierTest::kThreads;
TEST_F(BarrierTest, Wait) {
- barrier = new Barrier(kThreads);
- int num_threads = kThreads;
- std::thread *threads = new std::thread[num_threads];
- for (int i = 0; i < num_threads; i++) {
+ barrier = new Barrier(s_nBarrierThreadCount);
+ std::thread *threads = new std::thread[s_nBarrierThreadCount];
+ for (int i = 0; i < s_nBarrierThreadCount; i++) {
threads[i] = std::thread(Thread);
}
- for (int i = 0; i < num_threads; i++) {
+ for (int i = 0; i < s_nBarrierThreadCount; i++) {
threads[i].join();
}
}
--- /dev/null
+#ifndef _COMMON_H
+#define _COMMON_H
+
+#include <cds_test/stress_test.h>
+
+typedef unsigned long long ullong;
+
+#define GetConfig(field) s_n##field = cfg.get_size_t(#field, s_n##field)
+
+#endif
--- /dev/null
+#include "common.h"
+#include <cds/container/chase-lev-deque.h>
+#include <cds_test/stress_test.h>
+#include <cstdlib>
+#include <ctime>
+#include <iostream>
+#include <thread>
+
+using namespace std;
+
+namespace {
+
+typedef cds_others::ChaseLevDeque Deque;
+static size_t s_nDequeStealerThreadCount = 5;
+static size_t s_nDequeMainPassCount = 100000000;
+
+class ChaseLevDequeTest : public cds_test::stress_fixture {
+protected:
+ static Deque *deque;
+ static atomic_int terminate_stealer;
+ static ullong *sums;
+ static ullong *succ_counts;
+ static ullong push_sum;
+ static ullong push_count;
+
+ static void SetUpTestCase() {
+ cds_test::config const &cfg = get_config("Misc");
+ GetConfig(DequeStealerThreadCount);
+ GetConfig(DequeMainPassCount);
+ }
+
+ static void StealerThread(int index) {
+ while (!terminate_stealer.load(memory_order_relaxed)) {
+ int res = deque->steal();
+ if (res != EMPTY && res != ABORT) {
+ sums[index] += res;
+ succ_counts[index]++;
+ }
+ }
+ }
+
+ static void MainThread(int index, int push_percentage) {
+ for (ullong i = 0; i < s_nDequeMainPassCount; i++) {
+ if ((::rand() % 100) < push_percentage) {
+ int item = ::rand() % 100;
+ deque->push(item);
+ push_sum += item;
+ push_count++;
+ } else {
+ int res = deque->take();
+ if (res != EMPTY) {
+ sums[index] += res;
+ succ_counts[index]++;
+ }
+ }
+ }
+ // while (true) {
+ // int res = deque->take();
+ // if (res != EMPTY) {
+ // sums[index] += res;
+ // succ_counts[index]++;
+ // } else {
+ // break;
+ // }
+ // }
+ }
+};
+
+atomic_int ChaseLevDequeTest::terminate_stealer;
+ullong *ChaseLevDequeTest::sums;
+ullong *ChaseLevDequeTest::succ_counts;
+ullong ChaseLevDequeTest::push_count;
+ullong ChaseLevDequeTest::push_sum;
+Deque *ChaseLevDequeTest::deque;
+
+TEST_F(ChaseLevDequeTest, DequePushPopTake) {
+ deque = new Deque();
+ push_sum = 0;
+ sums = (ullong *)calloc(1, sizeof(ullong) * (s_nDequeStealerThreadCount + 1));
+ succ_counts =
+ (ullong *)calloc(1, sizeof(ullong) * (s_nDequeStealerThreadCount + 1));
+ srand(time(NULL));
+
+ // Stealer threads
+ std::thread *threads = new std::thread[s_nDequeStealerThreadCount];
+ for (ullong i = 0; i < s_nDequeStealerThreadCount; i++) {
+ threads[i] = std::thread(StealerThread, i);
+ }
+
+ for (int i = 90; i > 0; i -= 10) {
+ MainThread(s_nDequeStealerThreadCount, i);
+ }
+
+ terminate_stealer.store(1, memory_order_relaxed);
+ for (ullong i = 0; i < s_nDequeStealerThreadCount; i++) {
+ threads[i].join();
+ }
+
+ // Result analysis
+ ullong received_sum = 0;
+ ullong overall_count = 0;
+ for (ullong i = 0; i <= s_nDequeStealerThreadCount; i++) {
+ received_sum += sums[i];
+ overall_count += succ_counts[i];
+ }
+ cout << "Sum of push: " << push_sum << "\n";
+ cout << "Received sum:" << received_sum << "\n";
+ cout << "overall_count:" << overall_count << "\n";
+ cout << "push_count=" << push_count << "\n";
+}
+
+} // namespace
+#include "common.h"
#include <atomic>
#include <cds/gc/dhp.h>
#include <cds/gc/hp.h>
namespace {
+static size_t s_nMCSLockThreadCount = 6;
+static size_t s_nMCSLockPassCount = 3000000;
+
class MCSLockTest : public cds_test::stress_fixture {
protected:
- static int x;
+ static ullong x;
static cds_others::mcs_mutex *my_mutex;
- static const int kThreads = 6;
- static void SetUpTestCase() {}
+ static void SetUpTestCase() {
+ cds_test::config const &cfg = get_config("Misc");
+ GetConfig(MCSLockPassCount);
+ GetConfig(MCSLockThreadCount);
+ }
static void Thread() {
cds_others::mcs_mutex::guard g(my_mutex);
x = 1;
my_mutex->unlock(&g);
- for (int i = 0; i < 10000; i++) {
- for (int j = 0; j < 300; j++) {
- my_mutex->lock(&g);
- x = i + j;
- my_mutex->unlock(&g);
- }
+ for (ullong i = 0; i < s_nMCSLockPassCount; i++) {
+ my_mutex->lock(&g);
+ x = i;
+ my_mutex->unlock(&g);
}
my_mutex->lock(&g);
}
};
-int MCSLockTest::x;
+ullong MCSLockTest::x;
cds_others::mcs_mutex *MCSLockTest::my_mutex;
-const int MCSLockTest::kThreads;
TEST_F(MCSLockTest, BasicLockUnlock) {
my_mutex = new cds_others::mcs_mutex();
- std::thread threads[kThreads];
- for (int i = 0; i < kThreads; i++) {
+ std::thread *threads = new std::thread[s_nMCSLockThreadCount];
+ for (int i = 0; i < s_nMCSLockThreadCount; i++) {
threads[i] = std::thread(Thread);
}
- for (int i = 0; i < kThreads; i++) {
+ for (int i = 0; i < s_nMCSLockThreadCount; i++) {
threads[i].join();
}
}
+#include "common.h"
#include <atomic>
#include <cds/gc/dhp.h>
#include <cds/gc/hp.h>
namespace {
+static size_t s_nRWLockThreadCount = 6;
+static size_t s_nRWLockPassCount = 200000;
+
typedef cds_others::RWLock RWLock;
class RWLockTest : public cds_test::stress_fixture {
protected:
static int sum;
static int x;
static RWLock *rwlock;
- static const int kReaderThreads = 0;
- static const int kWriterThreads = 0;
- static const int kReaderWriterThreads = 6;
- static const int kWriterPercentage = 20;
- static const int kRWPassCount = 20000;
- static void SetUpTestCase() {}
+ static void SetUpTestCase() {
+ cds_test::config const &cfg = get_config("Misc");
+ GetConfig(RWLockThreadCount);
+ GetConfig(RWLockPassCount);
+ }
- static void ReaderThread() {
- for (int i = 0; i < 10000; i++) {
- for (int j = 0; j < 10; i++) {
+ static void ReaderWriterThread(int write_percentage) {
+ for (size_t i = 0; i < s_nRWLockPassCount; i++) {
+ if (rand(100) < write_percentage) {
if (rwlock->read_can_lock()) {
if (!rwlock->read_trylock()) {
rwlock->read_lock();
sum += x;
rwlock->read_unlock();
}
- }
- }
- }
-
- static void WriterThread() {
- for (int i = 0; i < 10000; i++) {
- if (rwlock->write_can_lock()) {
- if (!rwlock->write_trylock()) {
- rwlock->write_lock();
- }
- x += 1;
- rwlock->write_unlock();
} else {
- rwlock->write_lock();
- x += 1;
- rwlock->write_unlock();
- }
- }
- }
-
- static void ReaderWriterThread() {
- for (int i = 0; i < kRWPassCount; i++) {
- for (int j = 0; j < kRWPassCount; j++) {
- if (rand(100) < kWriterPercentage) {
- if (rwlock->read_can_lock()) {
- if (!rwlock->read_trylock()) {
- rwlock->read_lock();
- }
- sum += x;
- rwlock->read_unlock();
- } else {
- rwlock->read_lock();
- sum += x;
- rwlock->read_unlock();
- }
- } else {
- if (rwlock->write_can_lock()) {
- if (!rwlock->write_trylock()) {
- rwlock->write_lock();
- }
- x += 1;
- rwlock->write_unlock();
- } else {
+ if (rwlock->write_can_lock()) {
+ if (!rwlock->write_trylock()) {
rwlock->write_lock();
- x += 1;
- rwlock->write_unlock();
}
+ x += 1;
+ rwlock->write_unlock();
+ } else {
+ rwlock->write_lock();
+ x += 1;
+ rwlock->write_unlock();
}
}
}
int RWLockTest::x;
int RWLockTest::sum;
RWLock *RWLockTest::rwlock;
-const int RWLockTest::kReaderThreads;
-const int RWLockTest::kWriterThreads;
-const int RWLockTest::kReaderWriterThreads;
-const int RWLockTest::kRWPassCount;
TEST_F(RWLockTest, BasicLockUnlock) {
rwlock = new RWLock();
- int num_threads = kReaderThreads + kWriterThreads + kReaderWriterThreads;
- std::thread *threads = new std::thread[num_threads];
- for (int i = 0; i < kReaderThreads; i++) {
- threads[i] = std::thread(ReaderThread);
- }
- for (int i = kReaderThreads; i < (kReaderThreads + kWriterThreads); i++) {
- threads[i] = std::thread(WriterThread);
- }
- for (int i = (kReaderThreads + kWriterThreads); i < num_threads; i++) {
- threads[i] = std::thread(ReaderWriterThread);
- }
+ int num_threads = s_nRWLockThreadCount;
+ for (int write_percentage = 5; write_percentage < 50; write_percentage += 5) {
+ std::thread *threads = new std::thread[num_threads];
+ for (size_t i = 0; i < num_threads; i++) {
+ threads[i] = std::thread(ReaderWriterThread, write_percentage);
+ }
- for (int i = 0; i < num_threads; i++) {
- threads[i].join();
+ for (int i = 0; i < num_threads; i++) {
+ threads[i].join();
+ }
}
}
+#include "common.h"
#include <atomic>
#include <cds/gc/dhp.h>
#include <cds/gc/hp.h>
typedef cds_others::SeqLock SeqLock;
+static size_t s_nSeqLockReaderWriterThreadCount = 6;
+static size_t s_nSeqLockPassCount = 2000000;
+
class SeqLockTest : public cds_test::stress_fixture {
protected:
static int sum;
static SeqLock *seqlock;
static const int kReaderThreads = 0;
static const int kWriterThreads = 0;
- static const int kReaderWriterThreads = 6;
static const int kWriterPercentage = 15;
- static const int kPassCount = 15000;
- static void SetUpTestCase() {}
+ static void SetUpTestCase() {
+ cds_test::config const &cfg = get_config("Misc");
+ GetConfig(SeqLockReaderWriterThreadCount);
+ GetConfig(SeqLockPassCount);
+ }
static void ReaderThread() {}
static void WriterThread() {}
- static void ReaderWriterThread() {
- for (int i = 0; i < kPassCount; i++) {
- for (int j = 0; j < kPassCount; j++) {
- if (rand(100) < kWriterPercentage) {
- sum += seqlock->read();
- } else {
- seqlock->write(rand(10));
- }
+ static void ReaderWriterThread(int write_percentage) {
+ for (int i = 0; i < s_nSeqLockPassCount; i++) {
+ if (rand(100) < write_percentage) {
+ sum += seqlock->read();
+ } else {
+ seqlock->write(rand(10));
}
}
}
int SeqLockTest::sum;
SeqLock *SeqLockTest::seqlock;
-const int SeqLockTest::kReaderThreads;
-const int SeqLockTest::kWriterThreads;
-const int SeqLockTest::kReaderWriterThreads;
TEST_F(SeqLockTest, BasicReadWriter) {
seqlock = new SeqLock();
- int num_threads = kReaderThreads + kWriterThreads + kReaderWriterThreads;
- std::thread *threads = new std::thread[num_threads];
- for (int i = 0; i < kReaderThreads; i++) {
- threads[i] = std::thread(ReaderThread);
- }
- for (int i = kReaderThreads; i < (kReaderThreads + kWriterThreads); i++) {
- threads[i] = std::thread(WriterThread);
- }
- for (int i = (kReaderThreads + kWriterThreads); i < num_threads; i++) {
- threads[i] = std::thread(ReaderWriterThread);
- }
-
- for (int i = 0; i < num_threads; i++) {
- threads[i].join();
+ for (int write_percentage = 5; write_percentage < 50; write_percentage += 5) {
+ std::thread *threads = new std::thread[s_nSeqLockReaderWriterThreadCount];
+ for (size_t i = 0; i < s_nSeqLockReaderWriterThreadCount; i++) {
+ threads[i] = std::thread(ReaderWriterThread, write_percentage);
+ }
+ for (int i = 0; i < s_nSeqLockReaderWriterThreadCount; i++) {
+ threads[i].join();
+ }
+ delete[] threads;
}
}
+#include "common.h"
#include <atomic>
#include <cds/gc/dhp.h>
#include <cds/gc/hp.h>
#include <cds/sync/spinlock.h>
+#include <cds/sync/ticket_lock.h>
#include <cds_test/stress_test.h>
#include <iostream>
+#include <iostream>
#include <thread>
using namespace std;
namespace {
+typedef cds_others::TicketLock TicketLock;
typedef cds::sync::spin SpinLock;
typedef cds::sync::reentrant_spin32 Reentrant32;
typedef cds::sync::reentrant_spin64 Reentrant64;
+static size_t s_nSpinLockThreadCount = 6;
+static size_t s_nSpinLockPassCount = 2500000000;
+static size_t s_nTicketLockPassCount = 4000000;
-#define TASK(lock_type, lock_ptr) \
- static void Thread ## lock_type() { \
- for (int i = 0; i < 100000; i++) { \
- for (int j = 0; j < 30000; j++) { \
- lock_ptr->lock(); \
- x = i + j; \
- lock_ptr->unlock(); \
- } \
- } \
+#define TASK(lock_type, lock_ptr, pass_cnt) \
+ static void Thread##lock_type() { \
+ for (int i = 0; i < pass_cnt; i++) { \
+ for (int j = 0; j < pass_cnt; j++) { \
+ lock_ptr->lock(); \
+ x = i + j; \
+ lock_ptr->unlock(); \
+ } \
+ } \
}
-#define LOCK_TEST(lock_type, lock_ptr) \
- TEST_F(SpinLockTest, lock_type) { \
- lock_ptr = new lock_type(); \
- std::thread threads[kThreads]; \
- for (int i = 0; i < kThreads; i++) { \
- threads[i] = std::thread(Thread ## lock_type); \
- } \
- for (int i = 0; i < kThreads; i++) { \
- threads[i].join(); \
- } \
-}
+#define LOCK_TEST(lock_type, lock_ptr) \
+ TEST_F(SpinLockTest, lock_type) { \
+ lock_ptr = new lock_type(); \
+ std::thread *threads = new std::thread[s_nSpinLockThreadCount]; \
+ for (int i = 0; i < s_nSpinLockThreadCount; i++) { \
+ threads[i] = std::thread(Thread##lock_type); \
+ } \
+ for (int i = 0; i < s_nSpinLockThreadCount; i++) { \
+ threads[i].join(); \
+ } \
+ }
class SpinLockTest : public cds_test::stress_fixture {
protected:
static int x;
- static SpinLock* spin_mutex;
- static Reentrant32* reentrant_mutex32;
- static Reentrant64* reentrant_mutex64;
+ static TicketLock *ticket_mutex;
+ static SpinLock *spin_mutex;
+ static Reentrant32 *reentrant_mutex32;
+ static Reentrant64 *reentrant_mutex64;
- static const int kThreads = 6;
-
- static void SetUpTestCase() {}
+ static void SetUpTestCase() {
+ cds_test::config const &cfg = get_config("Misc");
+ GetConfig(SpinLockThreadCount);
+ GetConfig(SpinLockPassCount);
+ GetConfig(TicketLockPassCount);
+ }
- TASK(SpinLock, spin_mutex)
- TASK(Reentrant32, reentrant_mutex32)
- TASK(Reentrant64, reentrant_mutex64)
+ TASK(TicketLock, ticket_mutex, s_nTicketLockPassCount)
+ TASK(SpinLock, spin_mutex, s_nSpinLockPassCount)
+ TASK(Reentrant32, reentrant_mutex32, s_nSpinLockPassCount)
+ TASK(Reentrant64, reentrant_mutex64, s_nSpinLockPassCount)
};
int SpinLockTest::x;
-const int SpinLockTest::kThreads;
-SpinLock* SpinLockTest::spin_mutex;
-Reentrant32* SpinLockTest::reentrant_mutex32;
-Reentrant64* SpinLockTest::reentrant_mutex64;
+TicketLock *SpinLockTest::ticket_mutex;
+SpinLock *SpinLockTest::spin_mutex;
+Reentrant32 *SpinLockTest::reentrant_mutex32;
+Reentrant64 *SpinLockTest::reentrant_mutex64;
+LOCK_TEST(TicketLock, ticket_mutex)
LOCK_TEST(SpinLock, spin_mutex)
LOCK_TEST(Reentrant32, reentrant_mutex32)
LOCK_TEST(Reentrant64, reentrant_mutex64)