2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/SmallLocks.h>
20 #include <condition_variable>
27 #include <folly/Random.h>
28 #include <folly/portability/Asm.h>
29 #include <folly/portability/GTest.h>
30 #include <folly/portability/PThread.h>
31 #include <folly/portability/Unistd.h>
33 using folly::MSLGuard;
34 using folly::MicroLock;
35 using folly::MicroSpinLock;
38 #ifdef FOLLY_PICO_SPIN_LOCK_H_
39 using folly::PicoSpinLock;
50 memset(ar, 0, sizeof ar);
54 // Compile time test for packed struct support (requires that both of
55 // these classes are POD).
57 struct ignore1 { MicroSpinLock msl; int16_t foo; } FOLLY_PACK_ATTR;
58 static_assert(sizeof(ignore1) == 3, "Size check failed");
59 static_assert(sizeof(MicroSpinLock) == 1, "Size check failed");
60 #ifdef FOLLY_PICO_SPIN_LOCK_H_
61 struct ignore2 { PicoSpinLock<uint32_t> psl; int16_t foo; } FOLLY_PACK_ATTR;
62 static_assert(sizeof(ignore2) == 6, "Size check failed");
70 auto rng = folly::ThreadLocalPRNG();
71 for (int i = 0; i < max; i++) {
72 folly::asm_volatile_pause();
76 for (size_t j = 1; j < sizeof v.ar / sizeof j; ++j) {
77 EXPECT_EQ(first, v.ar[j]);
80 int byte = folly::Random::rand32(rng);
81 memset(v.ar, char(byte), sizeof v.ar);
85 #ifdef FOLLY_PICO_SPIN_LOCK_H_
86 template<class T> struct PslTest {
89 PslTest() { lock.init(); }
92 using UT = typename std::make_unsigned<T>::type;
93 T ourVal = rand() % T(UT(1) << (sizeof(UT) * 8 - 1));
94 for (int i = 0; i < 100; ++i) {
95 std::lock_guard<PicoSpinLock<T>> guard(lock);
97 for (int n = 0; n < 10; ++n) {
98 folly::asm_volatile_pause();
99 EXPECT_EQ(lock.getData(), ourVal);
109 const int nthrs = 17;
110 std::vector<std::thread> threads;
111 for (int i = 0; i < nthrs; ++i) {
112 threads.push_back(std::thread(&PslTest<T>::doTest, &testObj));
114 for (auto& t : threads) {
126 std::lock_guard<MicroSpinLock> g(lock_);
127 // This bug depends on gcc register allocation and is very sensitive. We
128 // have to use DCHECK instead of EXPECT_*.
129 DCHECK(!lock_.try_lock());
138 TEST(SmallLocks, SpinLockCorrectness) {
139 EXPECT_EQ(sizeof(MicroSpinLock), 1);
141 int nthrs = sysconf(_SC_NPROCESSORS_ONLN) * 2;
142 std::vector<std::thread> threads;
143 for (int i = 0; i < nthrs; ++i) {
144 threads.push_back(std::thread(splock_test));
146 for (auto& t : threads) {
151 #ifdef FOLLY_PICO_SPIN_LOCK_H_
152 TEST(SmallLocks, PicoSpinCorrectness) {
153 doPslTest<int16_t>();
154 doPslTest<uint16_t>();
155 doPslTest<int32_t>();
156 doPslTest<uint32_t>();
157 doPslTest<int64_t>();
158 doPslTest<uint64_t>();
161 TEST(SmallLocks, PicoSpinSigned) {
162 typedef PicoSpinLock<int16_t,0> Lock;
165 EXPECT_EQ(val.getData(), -4);
168 std::lock_guard<Lock> guard(val);
169 EXPECT_EQ(val.getData(), -4);
171 EXPECT_EQ(val.getData(), -8);
173 EXPECT_EQ(val.getData(), -8);
177 TEST(SmallLocks, RegClobber) {
182 #if defined(__SANITIZE_ADDRESS__) && !defined(__clang__) && \
183 (defined(__GNUC__) || defined(__GNUG__))
184 static_assert(sizeof(MicroLock) == 4, "Size check failed");
186 static_assert(sizeof(MicroLock) == 1, "Size check failed");
192 struct SimpleBarrier {
194 SimpleBarrier() : lock_(), cv_(), ready_(false) {}
197 std::unique_lock<std::mutex> lockHeld(lock_);
205 std::unique_lock<std::mutex> lockHeld(lock_);
214 std::condition_variable cv_;
219 TEST(SmallLocks, MicroLock) {
220 volatile uint64_t counters[4] = {0, 0, 0, 0};
221 std::vector<std::thread> threads;
222 static const unsigned nrThreads = 20;
223 static const unsigned iterPerThread = 10000;
224 SimpleBarrier startBarrier;
226 assert(iterPerThread % 4 == 0);
228 // Embed the lock in a larger structure to ensure that we do not
229 // affect bits outside the ones MicroLock is defined to affect.
232 std::atomic<uint8_t> b;
234 std::atomic<uint8_t> d;
245 // This thread touches other parts of the host word to show that
246 // MicroLock does not interfere with memory outside of the byte
248 std::thread adjacentMemoryToucher = std::thread([&] {
250 for (unsigned iter = 0; iter < iterPerThread; ++iter) {
259 for (unsigned i = 0; i < nrThreads; ++i) {
260 threads.emplace_back([&] {
262 for (unsigned iter = 0; iter < iterPerThread; ++iter) {
263 unsigned slotNo = iter % 4;
264 x.alock.lock(slotNo);
265 counters[slotNo] += 1;
266 // The occasional sleep makes it more likely that we'll
267 // exercise the futex-wait path inside MicroLock.
268 if (iter % 1000 == 0) {
269 struct timespec ts = {0, 10000};
270 (void)nanosleep(&ts, nullptr);
272 x.alock.unlock(slotNo);
279 for (auto it = threads.begin(); it != threads.end(); ++it) {
283 adjacentMemoryToucher.join();
286 EXPECT_EQ(x.b, (uint8_t)(origB + iterPerThread / 2));
287 EXPECT_EQ(x.d, (uint8_t)(origD + iterPerThread / 2));
288 for (unsigned i = 0; i < 4; ++i) {
289 EXPECT_EQ(counters[i], ((uint64_t)nrThreads * iterPerThread) / 4);
293 TEST(SmallLocks, MicroLockTryLock) {
296 EXPECT_TRUE(lock.try_lock());
297 EXPECT_FALSE(lock.try_lock());