detail/MemoryIdler.h \
detail/MPMCPipelineDetail.h \
detail/SlowFingerprint.h \
+ detail/SpinLock.h \
detail/Stats.h \
detail/ThreadLocalDetail.h \
detail/UncaughtExceptionCounter.h \
#pragma once
-#include <boost/noncopyable.hpp>
-#include <folly/Portability.h>
-
-// This is a wrapper SpinLock implementation that works around the
-// x64 limitation of the base folly MicroSpinLock. If that is available, this
-// simply thinly wraps it. Otherwise, it uses the simplest analog available on
-// iOS (or 32-bit Mac) or, failing that, POSIX (on Android et. al.)
-
-#if __x86_64__
-#include <folly/SmallLocks.h>
+#include <folly/detail/SpinLockImpl.h>
namespace folly {
-class SpinLock {
- public:
- FOLLY_ALWAYS_INLINE SpinLock() {
- lock_.init();
- }
- FOLLY_ALWAYS_INLINE void lock() const {
- lock_.lock();
- }
- FOLLY_ALWAYS_INLINE void unlock() const {
- lock_.unlock();
- }
- FOLLY_ALWAYS_INLINE bool trylock() const {
- return lock_.try_lock();
- }
- private:
- mutable folly::MicroSpinLock lock_;
-};
-
-}
-
+#if __x86_64__
+typedef SpinLockMslImpl SpinLock;
#elif __APPLE__
-#include <libkern/OSAtomic.h>
-
-namespace folly {
-
-class SpinLock {
- public:
- FOLLY_ALWAYS_INLINE SpinLock() : lock_(0) {}
- FOLLY_ALWAYS_INLINE void lock() const {
- OSSpinLockLock(&lock_);
- }
- FOLLY_ALWAYS_INLINE void unlock() const {
- OSSpinLockUnlock(&lock_);
- }
- FOLLY_ALWAYS_INLINE bool trylock() const {
- return OSSpinLockTry(&lock_);
- }
- private:
- mutable OSSpinLock lock_;
-};
-
-}
-
+typedef SpinLockAppleImpl SpinLock;
+#elif __ANDROID__
+typedef SpinLockPthreadMutexImpl SpinLock;
#else
-#include <pthread.h>
-#include <glog/logging.h>
-
-namespace folly {
-
-class SpinLock {
- public:
- FOLLY_ALWAYS_INLINE SpinLock() {
- pthread_mutex_init(&lock_, nullptr);
- }
- void lock() const {
- int rc = pthread_mutex_lock(&lock_);
- CHECK_EQ(0, rc);
- }
- FOLLY_ALWAYS_INLINE void unlock() const {
- int rc = pthread_mutex_unlock(&lock_);
- CHECK_EQ(0, rc);
- }
- FOLLY_ALWAYS_INLINE bool trylock() const {
- int rc = pthread_mutex_trylock(&lock_);
- CHECK_GE(rc, 0);
- return rc == 0;
- }
- private:
- mutable pthread_mutex_t lock_;
-};
-
-}
-
+typedef SpinLockPthreadImpl SpinLock;
#endif
-namespace folly {
-
-class SpinLockGuard : private boost::noncopyable {
+template <typename LOCK>
+class SpinLockGuardImpl : private boost::noncopyable {
public:
- FOLLY_ALWAYS_INLINE explicit SpinLockGuard(SpinLock& lock) :
+ FOLLY_ALWAYS_INLINE explicit SpinLockGuardImpl(LOCK& lock) :
lock_(lock) {
lock_.lock();
}
- FOLLY_ALWAYS_INLINE ~SpinLockGuard() {
+ FOLLY_ALWAYS_INLINE ~SpinLockGuardImpl() {
lock_.unlock();
}
private:
- SpinLock& lock_;
+ LOCK& lock_;
};
+typedef SpinLockGuardImpl<SpinLock> SpinLockGuard;
+
}
--- /dev/null
+/*
+ * Copyright 2014 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+/*
+ * This class provides a few spin lock implementations, depending on the
+ * platform. folly/SpinLock.h will select one of these as the folly::SpinLock
+ * implementation.
+ *
+ * The main reason we keep these separated out here is so that we can run unit
+ * tests for all supported spin lock implementations, even though only one will
+ * be selected as the actual folly::SpinLock implemenatation for any given
+ * platform.
+ */
+
+#include <boost/noncopyable.hpp>
+#include <folly/Portability.h>
+
+#if __x86_64__
+#include <folly/SmallLocks.h>
+
+namespace folly {
+
+class SpinLockMslImpl {
+ public:
+ FOLLY_ALWAYS_INLINE SpinLockMslImpl() {
+ lock_.init();
+ }
+ FOLLY_ALWAYS_INLINE void lock() const {
+ lock_.lock();
+ }
+ FOLLY_ALWAYS_INLINE void unlock() const {
+ lock_.unlock();
+ }
+ FOLLY_ALWAYS_INLINE bool trylock() const {
+ return lock_.try_lock();
+ }
+ private:
+ mutable folly::MicroSpinLock lock_;
+};
+
+}
+
+#endif // __x86_64__
+
+#if __APPLE__
+#include <libkern/OSAtomic.h>
+
+namespace folly {
+
+class SpinLockAppleImpl {
+ public:
+ FOLLY_ALWAYS_INLINE SpinLockAppleImpl() : lock_(0) {}
+ FOLLY_ALWAYS_INLINE void lock() const {
+ OSSpinLockLock(&lock_);
+ }
+ FOLLY_ALWAYS_INLINE void unlock() const {
+ OSSpinLockUnlock(&lock_);
+ }
+ FOLLY_ALWAYS_INLINE bool trylock() const {
+ return OSSpinLockTry(&lock_);
+ }
+ private:
+ mutable OSSpinLock lock_;
+};
+
+}
+
+#endif // __APPLE__
+
+#include <pthread.h>
+#include <folly/Exception.h>
+
+#if !__ANDROID__ && !__APPLE__
+
+// Apple and Android systems don't have pthread_spinlock_t, so we can't support
+// this version on those platforms.
+namespace folly {
+
+class SpinLockPthreadImpl {
+ public:
+ FOLLY_ALWAYS_INLINE SpinLockPthreadImpl() {
+ int rc = pthread_spin_init(&lock_, PTHREAD_PROCESS_PRIVATE);
+ checkPosixError(rc, "failed to initialize spinlock");
+ }
+ FOLLY_ALWAYS_INLINE ~SpinLockPthreadImpl() {
+ pthread_spin_destroy(&lock_);
+ }
+ void lock() const {
+ int rc = pthread_spin_lock(&lock_);
+ checkPosixError(rc, "error locking spinlock");
+ }
+ FOLLY_ALWAYS_INLINE void unlock() const {
+ int rc = pthread_spin_unlock(&lock_);
+ checkPosixError(rc, "error unlocking spinlock");
+ }
+ FOLLY_ALWAYS_INLINE bool trylock() const {
+ int rc = pthread_spin_trylock(&lock_);
+ if (rc == 0) {
+ return true;
+ } else if (rc == EBUSY) {
+ return false;
+ }
+ throwSystemErrorExplicit(rc, "spinlock trylock error");
+ }
+ private:
+ mutable pthread_spinlock_t lock_;
+};
+
+}
+
+#endif // !__ANDROID__ && !__APPLE__
+
+namespace folly {
+
+class SpinLockPthreadMutexImpl {
+ public:
+ FOLLY_ALWAYS_INLINE SpinLockPthreadMutexImpl() {
+ int rc = pthread_mutex_init(&lock_, nullptr);
+ checkPosixError(rc, "failed to initialize mutex");
+ }
+ FOLLY_ALWAYS_INLINE ~SpinLockPthreadMutexImpl() {
+ pthread_mutex_destroy(&lock_);
+ }
+ void lock() const {
+ int rc = pthread_mutex_lock(&lock_);
+ checkPosixError(rc, "error locking mutex");
+ }
+ FOLLY_ALWAYS_INLINE void unlock() const {
+ int rc = pthread_mutex_unlock(&lock_);
+ checkPosixError(rc, "error unlocking mutex");
+ }
+ FOLLY_ALWAYS_INLINE bool trylock() const {
+ int rc = pthread_mutex_trylock(&lock_);
+ if (rc == 0) {
+ return true;
+ } else if (rc == EBUSY) {
+ return false;
+ }
+ throwSystemErrorExplicit(rc, "mutex trylock error");
+ }
+ private:
+ mutable pthread_mutex_t lock_;
+};
+
+}
#include <gtest/gtest.h>
#include <thread>
-using folly::SpinLock;
-using folly::SpinLockGuard;
+using folly::SpinLockGuardImpl;
namespace {
+template <typename LOCK>
struct LockedVal {
int ar[1024];
- SpinLock lock;
+ LOCK lock;
LockedVal() {
memset(ar, 0, sizeof ar);
}
};
-LockedVal v;
-void splock_test() {
+template <typename LOCK>
+void spinlockTestThread(LockedVal<LOCK>* v) {
const int max = 1000;
unsigned int seed = (uintptr_t)pthread_self();
for (int i = 0; i < max; i++) {
asm("pause");
- SpinLockGuard g(v.lock);
+ SpinLockGuardImpl<LOCK> g(v->lock);
- int first = v.ar[0];
- for (size_t i = 1; i < sizeof v.ar / sizeof i; ++i) {
- EXPECT_EQ(first, v.ar[i]);
+ int first = v->ar[0];
+ for (size_t i = 1; i < sizeof v->ar / sizeof i; ++i) {
+ EXPECT_EQ(first, v->ar[i]);
}
int byte = rand_r(&seed);
- memset(v.ar, char(byte), sizeof v.ar);
+ memset(v->ar, char(byte), sizeof v->ar);
}
}
+template <typename LOCK>
struct TryLockState {
- SpinLock lock1;
- SpinLock lock2;
+ LOCK lock1;
+ LOCK lock2;
bool locked{false};
uint64_t obtained{0};
uint64_t failed{0};
};
-TryLockState tryState;
-void trylock_test() {
- const int max = 1000;
+template <typename LOCK>
+void trylockTestThread(TryLockState<LOCK>* state, int count) {
while (true) {
asm("pause");
- SpinLockGuard g(tryState.lock1);
- if (tryState.obtained >= max) {
+ SpinLockGuardImpl<LOCK> g(state->lock1);
+ if (state->obtained >= count) {
break;
}
- bool ret = tryState.lock2.trylock();
- EXPECT_NE(tryState.locked, ret);
+ bool ret = state->lock2.trylock();
+ EXPECT_NE(state->locked, ret);
if (ret) {
// We got lock2.
- ++tryState.obtained;
- tryState.locked = true;
-
- // Release lock1 and let other threads try to obtain lock2
- tryState.lock1.unlock();
- asm("pause");
- tryState.lock1.lock();
-
- tryState.locked = false;
- tryState.lock2.unlock();
+ ++state->obtained;
+ state->locked = true;
+
+ // Release lock1 and wait until at least one other thread fails to
+ // obtain the lock2 before continuing.
+ auto oldFailed = state->failed;
+ while (state->failed == oldFailed && state->obtained < count) {
+ state->lock1.unlock();
+ asm("pause");
+ state->lock1.lock();
+ }
+
+ state->locked = false;
+ state->lock2.unlock();
} else {
- ++tryState.failed;
+ ++state->failed;
}
}
}
-} // unnamed namespace
-
-TEST(SpinLock, Correctness) {
+template <typename LOCK>
+void correctnessTest() {
int nthrs = sysconf(_SC_NPROCESSORS_ONLN) * 2;
std::vector<std::thread> threads;
+ LockedVal<LOCK> v;
for (int i = 0; i < nthrs; ++i) {
- threads.push_back(std::thread(splock_test));
+ threads.push_back(std::thread(spinlockTestThread<LOCK>, &v));
}
for (auto& t : threads) {
t.join();
}
}
-TEST(SpinLock, TryLock) {
- int nthrs = sysconf(_SC_NPROCESSORS_ONLN) * 2;
+template <typename LOCK>
+void trylockTest() {
+ int nthrs = sysconf(_SC_NPROCESSORS_ONLN) + 4;
std::vector<std::thread> threads;
+ TryLockState<LOCK> state;
+ int count = 100;
for (int i = 0; i < nthrs; ++i) {
- threads.push_back(std::thread(trylock_test));
+ threads.push_back(std::thread(trylockTestThread<LOCK>, &state, count));
}
for (auto& t : threads) {
t.join();
}
- EXPECT_EQ(1000, tryState.obtained);
- EXPECT_GT(tryState.failed, 0);
- LOG(INFO) << "failed count: " << tryState.failed;
+ EXPECT_EQ(count, state.obtained);
+ // Each time the code obtains lock2 it waits for another thread to fail
+ // to acquire it. The only time this might not happen is on the very last
+ // loop when no other threads are left.
+ EXPECT_GE(state.failed + 1, state.obtained);
+}
+
+} // unnamed namespace
+
+#if __x86_64__
+TEST(SpinLock, MslCorrectness) {
+ correctnessTest<folly::SpinLockMslImpl>();
+}
+TEST(SpinLock, MslTryLock) {
+ trylockTest<folly::SpinLockMslImpl>();
+}
+#endif
+
+#if __APPLE__
+TEST(SpinLock, AppleCorrectness) {
+ correctnessTest<folly::SpinLockAppleImpl>();
+}
+TEST(SpinLock, AppleTryLock) {
+ trylockTest<folly::SpinLockAppleImpl>();
+}
+#endif
+
+#if !__ANDROID__
+TEST(SpinLock, PthreadCorrectness) {
+ correctnessTest<folly::SpinLockPthreadImpl>();
+}
+TEST(SpinLock, PthreadTryLock) {
+ trylockTest<folly::SpinLockPthreadImpl>();
+}
+#endif
+
+TEST(SpinLock, MutexCorrectness) {
+ correctnessTest<folly::SpinLockPthreadMutexImpl>();
+}
+TEST(SpinLock, MutexTryLock) {
+ trylockTest<folly::SpinLockPthreadMutexImpl>();
}