2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
20 #include <linux/futex.h>
24 #include <glog/logging.h>
26 #include <folly/Bits.h>
27 #include <folly/Likely.h>
28 #include <folly/portability/SysTime.h>
29 #include <folly/portability/Unistd.h>
36 inline int futex(int* uaddr, int op, int val, const timespec* timeout,
37 int* uaddr2, int val3) noexcept {
38 return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
44 * Event count: a condition variable for lock free algorithms.
46 * See http://www.1024cores.net/home/lock-free-algorithms/eventcounts for
49 * Event counts allow you to convert a non-blocking lock-free / wait-free
50 * algorithm into a blocking one, by isolating the blocking logic. You call
51 * prepareWait() before checking your condition and then either cancelWait()
52 * or wait() depending on whether the condition was true. When another
53 * thread makes the condition true, it must call notify() / notifyAll() just
54 * like a regular condition variable.
56 * If "<" denotes the happens-before relationship, consider 2 threads (T1 and
58 * - E1: T1 returns from prepareWait
60 * (obviously E1 < E2, intra-thread)
61 * - E3: T2 calls notifyAll
63 * If E1 < E3, then E2's wait will complete (and T1 will either wake up,
64 * or not block at all)
66 * This means that you can use an EventCount in the following manner:
69 * if (!condition()) { // handle fast path first
71 * auto key = eventCount.prepareWait();
73 * eventCount.cancelWait();
76 * eventCount.wait(key);
81 * (This pattern is encapsulated in await())
84 * make_condition_true();
85 * eventCount.notifyAll();
87 * Note that, just like with regular condition variables, the waiter needs to
88 * be tolerant of spurious wakeups and needs to recheck the condition after
89 * being woken up. Also, as there is no mutual exclusion implied, "checking"
90 * the condition likely means attempting an operation on an underlying
91 * data structure (push into a lock-free queue, etc) and returning true on
92 * success and false on failure.
96 EventCount() noexcept : val_(0) { }
99 friend class EventCount;
100 explicit Key(uint32_t e) noexcept : epoch_(e) { }
104 void notify() noexcept;
105 void notifyAll() noexcept;
106 Key prepareWait() noexcept;
107 void cancelWait() noexcept;
108 void wait(Key key) noexcept;
111 * Wait for condition() to become true. Will clean up appropriately if
112 * condition() throws, and then rethrow.
114 template <class Condition>
115 void await(Condition condition);
118 void doNotify(int n) noexcept;
119 EventCount(const EventCount&) = delete;
120 EventCount(EventCount&&) = delete;
121 EventCount& operator=(const EventCount&) = delete;
122 EventCount& operator=(EventCount&&) = delete;
124 // This requires 64-bit
125 static_assert(sizeof(int) == 4, "bad platform");
126 static_assert(sizeof(uint32_t) == 4, "bad platform");
127 static_assert(sizeof(uint64_t) == 8, "bad platform");
129 static constexpr size_t kEpochOffset = kIsLittleEndian ? 1 : 0;
131 // val_ stores the epoch in the most significant 32 bits and the
132 // waiter count in the least significant 32 bits.
133 std::atomic<uint64_t> val_;
135 static constexpr uint64_t kAddWaiter = uint64_t(1);
136 static constexpr uint64_t kSubWaiter = uint64_t(-1);
137 static constexpr size_t kEpochShift = 32;
138 static constexpr uint64_t kAddEpoch = uint64_t(1) << kEpochShift;
139 static constexpr uint64_t kWaiterMask = kAddEpoch - 1;
142 inline void EventCount::notify() noexcept {
146 inline void EventCount::notifyAll() noexcept {
150 inline void EventCount::doNotify(int n) noexcept {
151 uint64_t prev = val_.fetch_add(kAddEpoch, std::memory_order_acq_rel);
152 if (UNLIKELY(prev & kWaiterMask)) {
153 detail::futex(reinterpret_cast<int*>(&val_) + kEpochOffset,
154 FUTEX_WAKE, n, nullptr, nullptr, 0);
158 inline EventCount::Key EventCount::prepareWait() noexcept {
159 uint64_t prev = val_.fetch_add(kAddWaiter, std::memory_order_acq_rel);
160 return Key(prev >> kEpochShift);
163 inline void EventCount::cancelWait() noexcept {
164 // memory_order_relaxed would suffice for correctness, but the faster
165 // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
166 // (and thus system calls).
167 uint64_t prev = val_.fetch_add(kSubWaiter, std::memory_order_seq_cst);
168 DCHECK_NE((prev & kWaiterMask), 0);
171 inline void EventCount::wait(Key key) noexcept {
172 while ((val_.load(std::memory_order_acquire) >> kEpochShift) == key.epoch_) {
173 detail::futex(reinterpret_cast<int*>(&val_) + kEpochOffset,
174 FUTEX_WAIT, key.epoch_, nullptr, nullptr, 0);
176 // memory_order_relaxed would suffice for correctness, but the faster
177 // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
178 // (and thus system calls)
179 uint64_t prev = val_.fetch_add(kSubWaiter, std::memory_order_seq_cst);
180 DCHECK_NE((prev & kWaiterMask), 0);
183 template <class Condition>
184 void EventCount::await(Condition condition) {
185 if (condition()) return; // fast path
187 // condition() is the only thing that may throw, everything else is
188 // noexcept, so we can hoist the try/catch block outside of the loop
191 auto key = prepareWait();