/*
- * Copyright 2015 Facebook, Inc.
+ * Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <stdlib.h>
#include <thread>
#include <vector>
+
#include <boost/optional.hpp>
+#include <boost/thread/shared_mutex.hpp>
+
#include <folly/Benchmark.h>
#include <folly/MPMCQueue.h>
+#include <folly/RWSpinLock.h>
#include <folly/Random.h>
+#include <folly/portability/GFlags.h>
+#include <folly/portability/GTest.h>
#include <folly/test/DeterministicSchedule.h>
-#include <gflags/gflags.h>
-#include <gtest/gtest.h>
-
-#include <boost/thread/shared_mutex.hpp>
-#include <folly/RWSpinLock.h>
using namespace folly;
using namespace folly::test;
typedef SharedMutexImpl<false, void, DeterministicAtomic, true>
DSharedMutexWritePriority;
-COMMON_CONCURRENCY_SHARED_MUTEX_DECLARE_STATIC_STORAGE(
- DSharedMutexReadPriority);
-COMMON_CONCURRENCY_SHARED_MUTEX_DECLARE_STATIC_STORAGE(
- DSharedMutexWritePriority);
-
template <typename Lock>
void runBasicTest() {
Lock lock;
SharedMutexToken token;
{
+ // create an exclusive write lock via holder
typename Lock::WriteHolder holder(lock);
EXPECT_FALSE(lock.try_lock());
EXPECT_FALSE(lock.try_lock_shared(token));
+ // move ownership to another write holder via move constructor
typename Lock::WriteHolder holder2(std::move(holder));
+ EXPECT_FALSE(lock.try_lock());
+ EXPECT_FALSE(lock.try_lock_shared(token));
+
+ // move ownership to another write holder via assign operator
typename Lock::WriteHolder holder3;
holder3 = std::move(holder2);
+ EXPECT_FALSE(lock.try_lock());
+ EXPECT_FALSE(lock.try_lock_shared(token));
+ // downgrade from exclusive to upgrade lock via move constructor
typename Lock::UpgradeHolder holder4(std::move(holder3));
+
+ // ensure we can lock from a shared source
+ EXPECT_FALSE(lock.try_lock());
+ EXPECT_TRUE(lock.try_lock_shared(token));
+ lock.unlock_shared(token);
+
+ // promote from upgrade to exclusive lock via move constructor
typename Lock::WriteHolder holder5(std::move(holder4));
+ EXPECT_FALSE(lock.try_lock());
+ EXPECT_FALSE(lock.try_lock_shared(token));
+ // downgrade exclusive to shared lock via move constructor
typename Lock::ReadHolder holder6(std::move(holder5));
+ // ensure we can lock from another shared source
EXPECT_FALSE(lock.try_lock());
EXPECT_TRUE(lock.try_lock_shared(token));
lock.unlock_shared(token);
}
}
-static void folly_rwspin_reads(uint numOps,
+static void folly_rwspin_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, RWSpinLock, Locker>(
numOps, numThreads, useSeparateLocks);
}
-static void shmtx_wr_pri_reads(uint numOps,
+static void shmtx_wr_pri_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, SharedMutexWritePriority, TokenLocker>(
numOps, numThreads, useSeparateLocks);
}
-static void shmtx_w_bare_reads(uint numOps,
+static void shmtx_w_bare_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, SharedMutexWritePriority, Locker>(
numOps, numThreads, useSeparateLocks);
}
-static void shmtx_rd_pri_reads(uint numOps,
+static void shmtx_rd_pri_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, SharedMutexReadPriority, TokenLocker>(
numOps, numThreads, useSeparateLocks);
}
-static void shmtx_r_bare_reads(uint numOps,
+static void shmtx_r_bare_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, SharedMutexReadPriority, Locker>(
numOps, numThreads, useSeparateLocks);
}
-static void folly_ticket_reads(uint numOps,
+static void folly_ticket_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, RWTicketSpinLock64, Locker>(
numOps, numThreads, useSeparateLocks);
}
-static void boost_shared_reads(uint numOps,
+static void boost_shared_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, boost::shared_mutex, Locker>(
numOps, numThreads, useSeparateLocks);
}
-static void pthrd_rwlock_reads(uint numOps,
+static void pthrd_rwlock_reads(uint32_t numOps,
size_t numThreads,
bool useSeparateLocks) {
runContendedReaders<atomic, PosixRWLock, Locker>(
numOps, numThreads, writeFraction, useSeparateLocks);
}
-static void shmtx_wr_pri(uint numOps,
+static void shmtx_wr_pri(uint32_t numOps,
size_t numThreads,
double writeFraction,
bool useSeparateLocks) {
numOps, numThreads, writeFraction, useSeparateLocks);
}
-static void shmtx_w_bare(uint numOps,
+static void shmtx_w_bare(uint32_t numOps,
size_t numThreads,
double writeFraction,
bool useSeparateLocks) {
numOps, numThreads, writeFraction, useSeparateLocks);
}
-static void shmtx_rd_pri(uint numOps,
+static void shmtx_rd_pri(uint32_t numOps,
size_t numThreads,
double writeFraction,
bool useSeparateLocks) {
numOps, numThreads, writeFraction, useSeparateLocks);
}
-static void shmtx_r_bare(uint numOps,
+static void shmtx_r_bare(uint32_t numOps,
size_t numThreads,
double writeFraction,
bool useSeparateLocks) {
TEST(SharedMutex, mixed_mostly_read_read_prio) {
for (int pass = 0; pass < 5; ++pass) {
runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
- 50000, 32, 0.1, false);
+ 10000, 32, 0.1, false);
}
}
TEST(SharedMutex, mixed_mostly_read_write_prio) {
for (int pass = 0; pass < 5; ++pass) {
runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
- 50000, 32, 0.1, false);
+ 10000, 32, 0.1, false);
}
}
}
}
-#define BENCH_BASE(args...) BENCHMARK_NAMED_PARAM(args)
-#define BENCH_REL(args...) BENCHMARK_RELATIVE_NAMED_PARAM(args)
+#define BENCH_BASE(...) FB_VA_GLUE(BENCHMARK_NAMED_PARAM, (__VA_ARGS__))
+#define BENCH_REL(...) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM, (__VA_ARGS__))
// 100% reads. Best-case scenario for deferred locks. Lock is colocated
// with read data, so inline lock takes cache miss every time but deferred