2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/SmallLocks.h>
19 #include <folly/Random.h>
24 #include <condition_variable>
32 #include <gtest/gtest.h>
34 using folly::MSLGuard;
35 using folly::MicroLock;
36 using folly::MicroSpinLock;
37 using folly::PicoSpinLock;
48 memset(ar, 0, sizeof ar);
52 // Compile time test for packed struct support (requires that both of
53 // these classes are POD).
55 struct ignore1 { MicroSpinLock msl; int16_t foo; } FOLLY_PACK_ATTR;
56 struct ignore2 { PicoSpinLock<uint32_t> psl; int16_t foo; } FOLLY_PACK_ATTR;
57 static_assert(sizeof(ignore1) == 3, "Size check failed");
58 static_assert(sizeof(ignore2) == 6, "Size check failed");
59 static_assert(sizeof(MicroSpinLock) == 1, "Size check failed");
66 auto rng = folly::ThreadLocalPRNG();
67 for (int i = 0; i < max; i++) {
72 for (size_t i = 1; i < sizeof v.ar / sizeof i; ++i) {
73 EXPECT_EQ(first, v.ar[i]);
76 int byte = folly::Random::rand32(rng);
77 memset(v.ar, char(byte), sizeof v.ar);
81 template<class T> struct PslTest {
84 PslTest() { lock.init(); }
87 T ourVal = rand() % (T(1) << (sizeof(T) * 8 - 1));
88 for (int i = 0; i < 10000; ++i) {
89 std::lock_guard<PicoSpinLock<T>> guard(lock);
91 for (int n = 0; n < 10; ++n) {
92 folly::asm_volatile_pause();
93 EXPECT_EQ(lock.getData(), ourVal);
103 const int nthrs = 17;
104 std::vector<std::thread> threads;
105 for (int i = 0; i < nthrs; ++i) {
106 threads.push_back(std::thread(&PslTest<T>::doTest, &testObj));
108 for (auto& t : threads) {
119 std::lock_guard<MicroSpinLock> g(lock_);
120 // This bug depends on gcc register allocation and is very sensitive. We
121 // have to use DCHECK instead of EXPECT_*.
122 DCHECK(!lock_.try_lock());
131 TEST(SmallLocks, SpinLockCorrectness) {
132 EXPECT_EQ(sizeof(MicroSpinLock), 1);
134 int nthrs = sysconf(_SC_NPROCESSORS_ONLN) * 2;
135 std::vector<std::thread> threads;
136 for (int i = 0; i < nthrs; ++i) {
137 threads.push_back(std::thread(splock_test));
139 for (auto& t : threads) {
144 TEST(SmallLocks, PicoSpinCorrectness) {
145 doPslTest<int16_t>();
146 doPslTest<uint16_t>();
147 doPslTest<int32_t>();
148 doPslTest<uint32_t>();
149 doPslTest<int64_t>();
150 doPslTest<uint64_t>();
153 TEST(SmallLocks, PicoSpinSigned) {
154 typedef PicoSpinLock<int16_t,0> Lock;
157 EXPECT_EQ(val.getData(), -4);
160 std::lock_guard<Lock> guard(val);
161 EXPECT_EQ(val.getData(), -4);
163 EXPECT_EQ(val.getData(), -8);
165 EXPECT_EQ(val.getData(), -8);
168 TEST(SmallLocks, RegClobber) {
173 static_assert(sizeof(MicroLock) == 1, "Size check failed");
178 struct SimpleBarrier {
180 SimpleBarrier() : lock_(), cv_(), ready_(false) {}
183 std::unique_lock<std::mutex> lockHeld(lock_);
191 std::unique_lock<std::mutex> lockHeld(lock_);
200 std::condition_variable cv_;
205 static void runMicroLockTest() {
206 volatile uint64_t counters[4] = {0, 0, 0, 0};
207 std::vector<std::thread> threads;
208 static const unsigned nrThreads = 20;
209 static const unsigned iterPerThread = 10000;
210 SimpleBarrier startBarrier;
212 assert(iterPerThread % 4 == 0);
214 // Embed the lock in a larger structure to ensure that we do not
215 // affect bits outside the ones MicroLock is defined to affect.
234 // This thread touches other parts of the host word to show that
235 // MicroLock does not interfere with memory outside of the byte
237 std::thread adjacentMemoryToucher = std::thread([&] {
239 for (unsigned iter = 0; iter < iterPerThread; ++iter) {
248 for (unsigned i = 0; i < nrThreads; ++i) {
249 threads.emplace_back([&] {
251 for (unsigned iter = 0; iter < iterPerThread; ++iter) {
252 unsigned slotNo = iter % 4;
253 x.alock.lock(slotNo);
254 counters[slotNo] += 1;
255 // The occasional sleep makes it more likely that we'll
256 // exercise the futex-wait path inside MicroLock.
257 if (iter % 1000 == 0) {
258 struct timespec ts = {0, 10000};
259 (void)nanosleep(&ts, nullptr);
261 x.alock.unlock(slotNo);
268 for (auto it = threads.begin(); it != threads.end(); ++it) {
272 adjacentMemoryToucher.join();
275 EXPECT_EQ(x.b, (uint8_t)(origB + iterPerThread / 2));
277 EXPECT_EQ(x.d, (uint8_t)(origD + iterPerThread / 2));
278 for (unsigned i = 0; i < 4; ++i) {
279 EXPECT_EQ(counters[i], ((uint64_t)nrThreads * iterPerThread) / 4);
283 TEST(SmallLocks, MicroLock) { runMicroLockTest(); }
284 TEST(SmallLocks, MicroLockTryLock) {
287 EXPECT_TRUE(lock.try_lock());
288 EXPECT_FALSE(lock.try_lock());