2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
20 * @author Keith Adams <kma@fb.com>
21 * @author Jordan DeLong <delong.j@fb.com>
26 #include <type_traits>
27 #include <boost/noncopyable.hpp>
33 #include <glog/logging.h>
34 #include <folly/detail/Sleeper.h>
35 #include <folly/Portability.h>
40 * A really, *really* small spinlock for fine-grained locking of lots
43 * Zero initializing these is guaranteed to be as good as calling
44 * init(), since the free state is guaranteed to be all-bits zero.
46 * This class should be kept a POD, so we can used it in other packed
47 * structs (gcc does not allow __attribute__((__packed__)) on structs that
48 * contain non-POD data). This means avoid adding a constructor, or
49 * making some members private, etc.
51 struct MicroSpinLock {
52 enum { FREE = 0, LOCKED = 1 };
53 // lock_ can't be std::atomic<> to preserve POD-ness.
56 // Initialize this MSL. It is unnecessary to call this if you
57 // zero-initialize the MicroSpinLock.
59 payload()->store(FREE);
63 return cas(FREE, LOCKED);
67 detail::Sleeper sleeper;
69 while (payload()->load() != FREE) {
72 } while (!try_lock());
73 DCHECK(payload()->load() == LOCKED);
77 CHECK(payload()->load() == LOCKED);
78 payload()->store(FREE, std::memory_order_release);
82 std::atomic<uint8_t>* payload() {
83 return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
86 bool cas(uint8_t compare, uint8_t newVal) {
87 return std::atomic_compare_exchange_strong_explicit(payload(), &compare, newVal,
88 std::memory_order_acquire,
89 std::memory_order_relaxed);
93 //////////////////////////////////////////////////////////////////////
96 * Array of spinlocks where each one is padded to prevent false sharing.
97 * Useful for shard-based locking implementations in environments where
98 * contention is unlikely.
101 // TODO: generate it from configure (`getconf LEVEL1_DCACHE_LINESIZE`)
102 #define FOLLY_CACHE_LINE_SIZE 64
104 template <class T, size_t N>
105 struct FOLLY_ALIGNED_MAX SpinLockArray {
106 T& operator[](size_t i) {
107 return data_[i].lock;
110 const T& operator[](size_t i) const {
111 return data_[i].lock;
114 constexpr size_t size() const { return N; }
117 struct PaddedSpinLock {
118 PaddedSpinLock() : lock() {}
120 char padding[FOLLY_CACHE_LINE_SIZE - sizeof(T)];
122 static_assert(sizeof(PaddedSpinLock) == FOLLY_CACHE_LINE_SIZE,
123 "Invalid size of PaddedSpinLock");
125 // Check if T can theoretically cross a cache line.
126 static_assert(alignof(std::max_align_t) > 0 &&
127 FOLLY_CACHE_LINE_SIZE % alignof(std::max_align_t) == 0 &&
128 sizeof(T) <= alignof(std::max_align_t),
129 "T can cross cache line boundaries");
131 char padding_[FOLLY_CACHE_LINE_SIZE];
132 std::array<PaddedSpinLock, N> data_;
135 //////////////////////////////////////////////////////////////////////
137 typedef std::lock_guard<MicroSpinLock> MSLGuard;
139 //////////////////////////////////////////////////////////////////////