2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include <glog/logging.h>
25 #include <folly/Bits.h>
26 #include <folly/Likely.h>
27 #include <folly/detail/Futex.h>
28 #include <folly/portability/SysTime.h>
29 #include <folly/portability/Unistd.h>
35 * Event count: a condition variable for lock free algorithms.
37 * See http://www.1024cores.net/home/lock-free-algorithms/eventcounts for
40 * Event counts allow you to convert a non-blocking lock-free / wait-free
41 * algorithm into a blocking one, by isolating the blocking logic. You call
42 * prepareWait() before checking your condition and then either cancelWait()
43 * or wait() depending on whether the condition was true. When another
44 * thread makes the condition true, it must call notify() / notifyAll() just
45 * like a regular condition variable.
47 * If "<" denotes the happens-before relationship, consider 2 threads (T1 and
49 * - E1: T1 returns from prepareWait
51 * (obviously E1 < E2, intra-thread)
52 * - E3: T2 calls notifyAll
54 * If E1 < E3, then E2's wait will complete (and T1 will either wake up,
55 * or not block at all)
57 * This means that you can use an EventCount in the following manner:
60 * if (!condition()) { // handle fast path first
62 * auto key = eventCount.prepareWait();
64 * eventCount.cancelWait();
67 * eventCount.wait(key);
72 * (This pattern is encapsulated in await())
75 * make_condition_true();
76 * eventCount.notifyAll();
78 * Note that, just like with regular condition variables, the waiter needs to
79 * be tolerant of spurious wakeups and needs to recheck the condition after
80 * being woken up. Also, as there is no mutual exclusion implied, "checking"
81 * the condition likely means attempting an operation on an underlying
82 * data structure (push into a lock-free queue, etc) and returning true on
83 * success and false on failure.
87 EventCount() noexcept : val_(0) { }
90 friend class EventCount;
91 explicit Key(uint32_t e) noexcept : epoch_(e) { }
95 void notify() noexcept;
96 void notifyAll() noexcept;
97 Key prepareWait() noexcept;
98 void cancelWait() noexcept;
99 void wait(Key key) noexcept;
102 * Wait for condition() to become true. Will clean up appropriately if
103 * condition() throws, and then rethrow.
105 template <class Condition>
106 void await(Condition condition);
109 void doNotify(int n) noexcept;
110 EventCount(const EventCount&) = delete;
111 EventCount(EventCount&&) = delete;
112 EventCount& operator=(const EventCount&) = delete;
113 EventCount& operator=(EventCount&&) = delete;
115 // This requires 64-bit
116 static_assert(sizeof(int) == 4, "bad platform");
117 static_assert(sizeof(uint32_t) == 4, "bad platform");
118 static_assert(sizeof(uint64_t) == 8, "bad platform");
119 static_assert(sizeof(std::atomic<uint64_t>) == 8, "bad platform");
120 static_assert(sizeof(detail::Futex<std::atomic>) == 4, "bad platform");
122 static constexpr size_t kEpochOffset = kIsLittleEndian ? 1 : 0;
124 // val_ stores the epoch in the most significant 32 bits and the
125 // waiter count in the least significant 32 bits.
126 std::atomic<uint64_t> val_;
128 static constexpr uint64_t kAddWaiter = uint64_t(1);
129 static constexpr uint64_t kSubWaiter = uint64_t(-1);
130 static constexpr size_t kEpochShift = 32;
131 static constexpr uint64_t kAddEpoch = uint64_t(1) << kEpochShift;
132 static constexpr uint64_t kWaiterMask = kAddEpoch - 1;
135 inline void EventCount::notify() noexcept {
139 inline void EventCount::notifyAll() noexcept {
143 inline void EventCount::doNotify(int n) noexcept {
144 uint64_t prev = val_.fetch_add(kAddEpoch, std::memory_order_acq_rel);
145 if (UNLIKELY(prev & kWaiterMask)) {
146 (reinterpret_cast<detail::Futex<std::atomic>*>(&val_) + kEpochOffset)
151 inline EventCount::Key EventCount::prepareWait() noexcept {
152 uint64_t prev = val_.fetch_add(kAddWaiter, std::memory_order_acq_rel);
153 return Key(prev >> kEpochShift);
156 inline void EventCount::cancelWait() noexcept {
157 // memory_order_relaxed would suffice for correctness, but the faster
158 // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
159 // (and thus system calls).
160 uint64_t prev = val_.fetch_add(kSubWaiter, std::memory_order_seq_cst);
161 DCHECK_NE((prev & kWaiterMask), 0);
164 inline void EventCount::wait(Key key) noexcept {
165 while ((val_.load(std::memory_order_acquire) >> kEpochShift) == key.epoch_) {
166 (reinterpret_cast<detail::Futex<std::atomic>*>(&val_) + kEpochOffset)
167 ->futexWait(key.epoch_);
169 // memory_order_relaxed would suffice for correctness, but the faster
170 // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
171 // (and thus system calls)
172 uint64_t prev = val_.fetch_add(kSubWaiter, std::memory_order_seq_cst);
173 DCHECK_NE((prev & kWaiterMask), 0);
176 template <class Condition>
177 void EventCount::await(Condition condition) {
182 // condition() is the only thing that may throw, everything else is
183 // noexcept, so we can hoist the try/catch block outside of the loop
186 auto key = prepareWait();