2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
22 #include <boost/noncopyable.hpp>
25 #include <type_traits>
28 #include <folly/Traits.h>
29 #include <folly/detail/CacheLocality.h>
30 #include <folly/detail/Futex.h>
36 template<typename T, template<typename> class Atom>
37 struct SingleElementQueue;
39 template <typename T> class MPMCPipelineStageImpl;
43 /// MPMCQueue<T> is a high-performance bounded concurrent queue that
44 /// supports multiple producers, multiple consumers, and optional blocking.
45 /// The queue has a fixed capacity, for which all memory will be allocated
46 /// up front. The bulk of the work of enqueuing and dequeuing can be
47 /// performed in parallel.
49 /// MPMCQueue is linearizable. That means that if a call to write(A)
50 /// returns before a call to write(B) begins, then A will definitely end up
51 /// in the queue before B, and if a call to read(X) returns before a call
52 /// to read(Y) is started, that X will be something from earlier in the
53 /// queue than Y. This also means that if a read call returns a value, you
54 /// can be sure that all previous elements of the queue have been assigned
55 /// a reader (that reader might not yet have returned, but it exists).
57 /// The underlying implementation uses a ticket dispenser for the head and
58 /// the tail, spreading accesses across N single-element queues to produce
59 /// a queue with capacity N. The ticket dispensers use atomic increment,
60 /// which is more robust to contention than a CAS loop. Each of the
61 /// single-element queues uses its own CAS to serialize access, with an
62 /// adaptive spin cutoff. When spinning fails on a single-element queue
63 /// it uses futex()'s _BITSET operations to reduce unnecessary wakeups
64 /// even if multiple waiters are present on an individual queue (such as
65 /// when the MPMCQueue's capacity is smaller than the number of enqueuers
68 /// In benchmarks (contained in tao/queues/ConcurrentQueueTests)
69 /// it handles 1 to 1, 1 to N, N to 1, and N to M thread counts better
70 /// than any of the alternatives present in fbcode, for both small (~10)
71 /// and large capacities. In these benchmarks it is also faster than
72 /// tbb::concurrent_bounded_queue for all configurations. When there are
73 /// many more threads than cores, MPMCQueue is _much_ faster than the tbb
74 /// queue because it uses futex() to block and unblock waiting threads,
75 /// rather than spinning with sched_yield.
77 /// NOEXCEPT INTERACTION: tl;dr; If it compiles you're fine. Ticket-based
78 /// queues separate the assignment of queue positions from the actual
79 /// construction of the in-queue elements, which means that the T
80 /// constructor used during enqueue must not throw an exception. This is
81 /// enforced at compile time using type traits, which requires that T be
82 /// adorned with accurate noexcept information. If your type does not
83 /// use noexcept, you will have to wrap it in something that provides
84 /// the guarantee. We provide an alternate safe implementation for types
85 /// that don't use noexcept but that are marked folly::IsRelocatable
86 /// and boost::has_nothrow_constructor, which is common for folly types.
87 /// In particular, if you can declare FOLLY_ASSUME_FBVECTOR_COMPATIBLE
88 /// then your type can be put in MPMCQueue.
90 /// If you have a pool of N queue consumers that you want to shut down
91 /// after the queue has drained, one way is to enqueue N sentinel values
92 /// to the queue. If the producer doesn't know how many consumers there
93 /// are you can enqueue one sentinel and then have each consumer requeue
94 /// two sentinels after it receives it (by requeuing 2 the shutdown can
95 /// complete in O(log P) time instead of O(P)).
97 template<typename> class Atom = std::atomic>
98 class MPMCQueue : boost::noncopyable {
100 static_assert(std::is_nothrow_constructible<T,T&&>::value ||
101 folly::IsRelocatable<T>::value,
102 "T must be relocatable or have a noexcept move constructor");
104 friend class detail::MPMCPipelineStageImpl<T>;
106 typedef T value_type;
108 explicit MPMCQueue(size_t queueCapacity)
109 : capacity_(queueCapacity)
115 if (queueCapacity == 0)
116 throw std::invalid_argument(
117 "MPMCQueue with explicit capacity 0 is impossible"
120 // would sigfpe if capacity is 0
121 stride_ = computeStride(queueCapacity);
122 slots_ = new detail::SingleElementQueue<T,Atom>[queueCapacity +
125 // ideally this would be a static assert, but g++ doesn't allow it
126 assert(alignof(MPMCQueue<T,Atom>)
127 >= detail::CacheLocality::kFalseSharingRange);
128 assert(static_cast<uint8_t*>(static_cast<void*>(&popTicket_))
129 - static_cast<uint8_t*>(static_cast<void*>(&pushTicket_))
130 >= detail::CacheLocality::kFalseSharingRange);
133 /// A default-constructed queue is useful because a usable (non-zero
134 /// capacity) queue can be moved onto it or swapped with it
145 /// IMPORTANT: The move constructor is here to make it easier to perform
146 /// the initialization phase, it is not safe to use when there are any
147 /// concurrent accesses (this is not checked).
148 MPMCQueue(MPMCQueue<T,Atom>&& rhs) noexcept
149 : capacity_(rhs.capacity_)
151 , stride_(rhs.stride_)
152 , pushTicket_(rhs.pushTicket_.load(std::memory_order_relaxed))
153 , popTicket_(rhs.popTicket_.load(std::memory_order_relaxed))
154 , pushSpinCutoff_(rhs.pushSpinCutoff_.load(std::memory_order_relaxed))
155 , popSpinCutoff_(rhs.popSpinCutoff_.load(std::memory_order_relaxed))
157 // relaxed ops are okay for the previous reads, since rhs queue can't
158 // be in concurrent use
162 rhs.slots_ = nullptr;
164 rhs.pushTicket_.store(0, std::memory_order_relaxed);
165 rhs.popTicket_.store(0, std::memory_order_relaxed);
166 rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed);
167 rhs.popSpinCutoff_.store(0, std::memory_order_relaxed);
170 /// IMPORTANT: The move operator is here to make it easier to perform
171 /// the initialization phase, it is not safe to use when there are any
172 /// concurrent accesses (this is not checked).
173 MPMCQueue<T,Atom> const& operator= (MPMCQueue<T,Atom>&& rhs) {
176 new (this) MPMCQueue(std::move(rhs));
181 /// MPMCQueue can only be safely destroyed when there are no
182 /// pending enqueuers or dequeuers (this is not checked).
187 /// Returns the number of successful reads minus the number of successful
188 /// writes. Waiting blockingRead and blockingWrite calls are included,
189 /// so this value can be negative.
190 ssize_t size() const noexcept {
191 // since both pushes and pops increase monotonically, we can get a
192 // consistent snapshot either by bracketing a read of popTicket_ with
193 // two reads of pushTicket_ that return the same value, or the other
194 // way around. We maximize our chances by alternately attempting
196 uint64_t pushes = pushTicket_.load(std::memory_order_acquire); // A
197 uint64_t pops = popTicket_.load(std::memory_order_acquire); // B
199 uint64_t nextPushes = pushTicket_.load(std::memory_order_acquire); // C
200 if (pushes == nextPushes) {
201 // pushTicket_ didn't change from A (or the previous C) to C,
202 // so we can linearize at B (or D)
203 return pushes - pops;
206 uint64_t nextPops = popTicket_.load(std::memory_order_acquire); // D
207 if (pops == nextPops) {
208 // popTicket_ didn't chance from B (or the previous D), so we
209 // can linearize at C
210 return pushes - pops;
216 /// Returns true if there are no items available for dequeue
217 bool isEmpty() const noexcept {
221 /// Returns true if there is currently no empty space to enqueue
222 bool isFull() const noexcept {
223 // careful with signed -> unsigned promotion, since size can be negative
224 return size() >= static_cast<ssize_t>(capacity_);
227 /// Returns is a guess at size() for contexts that don't need a precise
228 /// value, such as stats.
229 ssize_t sizeGuess() const noexcept {
230 return writeCount() - readCount();
234 size_t capacity() const noexcept {
238 /// Returns the total number of calls to blockingWrite or successful
239 /// calls to write, including those blockingWrite calls that are
240 /// currently blocking
241 uint64_t writeCount() const noexcept {
242 return pushTicket_.load(std::memory_order_acquire);
245 /// Returns the total number of calls to blockingRead or successful
246 /// calls to read, including those blockingRead calls that are currently
248 uint64_t readCount() const noexcept {
249 return popTicket_.load(std::memory_order_acquire);
252 /// Enqueues a T constructed from args, blocking until space is
253 /// available. Note that this method signature allows enqueue via
254 /// move, if args is a T rvalue, via copy, if args is a T lvalue, or
255 /// via emplacement if args is an initializer list that can be passed
256 /// to a T constructor.
257 template <typename ...Args>
258 void blockingWrite(Args&&... args) noexcept {
259 enqueueWithTicket(pushTicket_++, std::forward<Args>(args)...);
262 /// If an item can be enqueued with no blocking, does so and returns
263 /// true, otherwise returns false. This method is similar to
264 /// writeIfNotFull, but if you don't have a specific need for that
265 /// method you should use this one.
267 /// One of the common usages of this method is to enqueue via the
268 /// move constructor, something like q.write(std::move(x)). If write
269 /// returns false because the queue is full then x has not actually been
270 /// consumed, which looks strange. To understand why it is actually okay
271 /// to use x afterward, remember that std::move is just a typecast that
272 /// provides an rvalue reference that enables use of a move constructor
273 /// or operator. std::move doesn't actually move anything. It could
274 /// more accurately be called std::rvalue_cast or std::move_permission.
275 template <typename ...Args>
276 bool write(Args&&... args) noexcept {
278 if (tryObtainReadyPushTicket(ticket)) {
279 // we have pre-validated that the ticket won't block
280 enqueueWithTicket(ticket, std::forward<Args>(args)...);
287 /// If the queue is not full, enqueues and returns true, otherwise
288 /// returns false. Unlike write this method can be blocked by another
289 /// thread, specifically a read that has linearized (been assigned
290 /// a ticket) but not yet completed. If you don't really need this
291 /// function you should probably use write.
293 /// MPMCQueue isn't lock-free, so just because a read operation has
294 /// linearized (and isFull is false) doesn't mean that space has been
295 /// made available for another write. In this situation write will
296 /// return false, but writeIfNotFull will wait for the dequeue to finish.
297 /// This method is required if you are composing queues and managing
298 /// your own wakeup, because it guarantees that after every successful
299 /// write a readIfNotEmpty will succeed.
300 template <typename ...Args>
301 bool writeIfNotFull(Args&&... args) noexcept {
303 if (tryObtainPromisedPushTicket(ticket)) {
304 // some other thread is already dequeuing the slot into which we
305 // are going to enqueue, but we might have to wait for them to finish
306 enqueueWithTicket(ticket, std::forward<Args>(args)...);
313 /// Moves a dequeued element onto elem, blocking until an element
315 void blockingRead(T& elem) noexcept {
316 dequeueWithTicket(popTicket_++, elem);
319 /// If an item can be dequeued with no blocking, does so and returns
320 /// true, otherwise returns false.
321 bool read(T& elem) noexcept {
323 if (tryObtainReadyPopTicket(ticket)) {
324 // the ticket has been pre-validated to not block
325 dequeueWithTicket(ticket, elem);
332 /// If the queue is not empty, dequeues and returns true, otherwise
333 /// returns false. If the matching write is still in progress then this
334 /// method may block waiting for it. If you don't rely on being able
335 /// to dequeue (such as by counting completed write) then you should
337 bool readIfNotEmpty(T& elem) noexcept {
339 if (tryObtainPromisedPopTicket(ticket)) {
340 // the matching enqueue already has a ticket, but might not be done
341 dequeueWithTicket(ticket, elem);
350 /// Once every kAdaptationFreq we will spin longer, to try to estimate
351 /// the proper spin backoff
352 kAdaptationFreq = 128,
354 /// To avoid false sharing in slots_ with neighboring memory
355 /// allocations, we pad it with this many SingleElementQueue-s at
357 kSlotPadding = (detail::CacheLocality::kFalseSharingRange - 1)
358 / sizeof(detail::SingleElementQueue<T,Atom>) + 1
361 /// The maximum number of items in the queue at once
362 size_t FOLLY_ALIGN_TO_AVOID_FALSE_SHARING capacity_;
364 /// An array of capacity_ SingleElementQueue-s, each of which holds
365 /// either 0 or 1 item. We over-allocate by 2 * kSlotPadding and don't
366 /// touch the slots at either end, to avoid false sharing
367 detail::SingleElementQueue<T,Atom>* slots_;
369 /// The number of slots_ indices that we advance for each ticket, to
370 /// avoid false sharing. Ideally slots_[i] and slots_[i + stride_]
371 /// aren't on the same cache line
374 /// Enqueuers get tickets from here
375 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushTicket_;
377 /// Dequeuers get tickets from here
378 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popTicket_;
380 /// This is how many times we will spin before using FUTEX_WAIT when
381 /// the queue is full on enqueue, adaptively computed by occasionally
382 /// spinning for longer and smoothing with an exponential moving average
383 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_;
385 /// The adaptive spin cutoff when the queue is empty on dequeue
386 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_;
388 /// Alignment doesn't prevent false sharing at the end of the struct,
389 /// so fill out the last cache line
390 char padding_[detail::CacheLocality::kFalseSharingRange -
391 sizeof(Atom<uint32_t>)];
394 /// We assign tickets in increasing order, but we don't want to
395 /// access neighboring elements of slots_ because that will lead to
396 /// false sharing (multiple cores accessing the same cache line even
397 /// though they aren't accessing the same bytes in that cache line).
398 /// To avoid this we advance by stride slots per ticket.
400 /// We need gcd(capacity, stride) to be 1 so that we will use all
401 /// of the slots. We ensure this by only considering prime strides,
402 /// which either have no common divisors with capacity or else have
403 /// a zero remainder after dividing by capacity. That is sufficient
404 /// to guarantee correctness, but we also want to actually spread the
405 /// accesses away from each other to avoid false sharing (consider a
406 /// stride of 7 with a capacity of 8). To that end we try a few taking
407 /// care to observe that advancing by -1 is as bad as advancing by 1
408 /// when in comes to false sharing.
410 /// The simple way to avoid false sharing would be to pad each
411 /// SingleElementQueue, but since we have capacity_ of them that could
412 /// waste a lot of space.
413 static int computeStride(size_t capacity) noexcept {
414 static const int smallPrimes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23 };
418 for (int stride : smallPrimes) {
419 if ((stride % capacity) == 0 || (capacity % stride) == 0) {
422 size_t sep = stride % capacity;
423 sep = std::min(sep, capacity - sep);
432 /// Returns the index into slots_ that should be used when enqueuing or
433 /// dequeuing with the specified ticket
434 size_t idx(uint64_t ticket) noexcept {
435 return ((ticket * stride_) % capacity_) + kSlotPadding;
438 /// Maps an enqueue or dequeue ticket to the turn should be used at the
439 /// corresponding SingleElementQueue
440 uint32_t turn(uint64_t ticket) noexcept {
441 return ticket / capacity_;
444 /// Tries to obtain a push ticket for which SingleElementQueue::enqueue
445 /// won't block. Returns true on immediate success, false on immediate
447 bool tryObtainReadyPushTicket(uint64_t& rv) noexcept {
448 auto ticket = pushTicket_.load(std::memory_order_acquire); // A
450 if (!slots_[idx(ticket)].mayEnqueue(turn(ticket))) {
451 // if we call enqueue(ticket, ...) on the SingleElementQueue
452 // right now it would block, but this might no longer be the next
453 // ticket. We can increase the chance of tryEnqueue success under
454 // contention (without blocking) by rechecking the ticket dispenser
456 ticket = pushTicket_.load(std::memory_order_acquire); // B
457 if (prev == ticket) {
458 // mayEnqueue was bracketed by two reads (A or prev B or prev
459 // failing CAS to B), so we are definitely unable to enqueue
463 // we will bracket the mayEnqueue check with a read (A or prev B
464 // or prev failing CAS) and the following CAS. If the CAS fails
465 // it will effect a load of pushTicket_
466 if (pushTicket_.compare_exchange_strong(ticket, ticket + 1)) {
474 /// Tries to obtain a push ticket which can be satisfied if all
475 /// in-progress pops complete. This function does not block, but
476 /// blocking may be required when using the returned ticket if some
477 /// other thread's pop is still in progress (ticket has been granted but
478 /// pop has not yet completed).
479 bool tryObtainPromisedPushTicket(uint64_t& rv) noexcept {
480 auto numPushes = pushTicket_.load(std::memory_order_acquire); // A
482 auto numPops = popTicket_.load(std::memory_order_acquire); // B
483 // n will be negative if pops are pending
484 int64_t n = numPushes - numPops;
485 if (n >= static_cast<ssize_t>(capacity_)) {
486 // Full, linearize at B. We don't need to recheck the read we
487 // performed at A, because if numPushes was stale at B then the
488 // real numPushes value is even worse
491 if (pushTicket_.compare_exchange_strong(numPushes, numPushes + 1)) {
498 /// Tries to obtain a pop ticket for which SingleElementQueue::dequeue
499 /// won't block. Returns true on immediate success, false on immediate
501 bool tryObtainReadyPopTicket(uint64_t& rv) noexcept {
502 auto ticket = popTicket_.load(std::memory_order_acquire);
504 if (!slots_[idx(ticket)].mayDequeue(turn(ticket))) {
506 ticket = popTicket_.load(std::memory_order_acquire);
507 if (prev == ticket) {
511 if (popTicket_.compare_exchange_strong(ticket, ticket + 1)) {
519 /// Similar to tryObtainReadyPopTicket, but returns a pop ticket whose
520 /// corresponding push ticket has already been handed out, rather than
521 /// returning one whose corresponding push ticket has already been
522 /// completed. This means that there is a possibility that the caller
523 /// will block when using the ticket, but it allows the user to rely on
524 /// the fact that if enqueue has succeeded, tryObtainPromisedPopTicket
525 /// will return true. The "try" part of this is that we won't have
526 /// to block waiting for someone to call enqueue, although we might
527 /// have to block waiting for them to finish executing code inside the
528 /// MPMCQueue itself.
529 bool tryObtainPromisedPopTicket(uint64_t& rv) noexcept {
530 auto numPops = popTicket_.load(std::memory_order_acquire); // A
532 auto numPushes = pushTicket_.load(std::memory_order_acquire); // B
533 if (numPops >= numPushes) {
534 // Empty, or empty with pending pops. Linearize at B. We don't
535 // need to recheck the read we performed at A, because if numPops
536 // is stale then the fresh value is larger and the >= is still true
539 if (popTicket_.compare_exchange_strong(numPops, numPops + 1)) {
546 // Given a ticket, constructs an enqueued item using args
547 template <typename ...Args>
548 void enqueueWithTicket(uint64_t ticket, Args&&... args) noexcept {
549 slots_[idx(ticket)].enqueue(turn(ticket),
551 (ticket % kAdaptationFreq) == 0,
552 std::forward<Args>(args)...);
555 // Given a ticket, dequeues the corresponding element
556 void dequeueWithTicket(uint64_t ticket, T& elem) noexcept {
557 slots_[idx(ticket)].dequeue(turn(ticket),
559 (ticket % kAdaptationFreq) == 0,
567 /// A TurnSequencer allows threads to order their execution according to
568 /// a monotonically increasing (with wraparound) "turn" value. The two
569 /// operations provided are to wait for turn T, and to move to the next
570 /// turn. Every thread that is waiting for T must have arrived before
571 /// that turn is marked completed (for MPMCQueue only one thread waits
572 /// for any particular turn, so this is trivially true).
574 /// TurnSequencer's state_ holds 26 bits of the current turn (shifted
575 /// left by 6), along with a 6 bit saturating value that records the
576 /// maximum waiter minus the current turn. Wraparound of the turn space
577 /// is expected and handled. This allows us to atomically adjust the
578 /// number of outstanding waiters when we perform a FUTEX_WAKE operation.
579 /// Compare this strategy to sem_t's separate num_waiters field, which
580 /// isn't decremented until after the waiting thread gets scheduled,
581 /// during which time more enqueues might have occurred and made pointless
582 /// FUTEX_WAKE calls.
584 /// TurnSequencer uses futex() directly. It is optimized for the
585 /// case that the highest awaited turn is 32 or less higher than the
586 /// current turn. We use the FUTEX_WAIT_BITSET variant, which lets
587 /// us embed 32 separate wakeup channels in a single futex. See
588 /// http://locklessinc.com/articles/futex_cheat_sheet for a description.
590 /// We only need to keep exact track of the delta between the current
591 /// turn and the maximum waiter for the 32 turns that follow the current
592 /// one, because waiters at turn t+32 will be awoken at turn t. At that
593 /// point they can then adjust the delta using the higher base. Since we
594 /// need to encode waiter deltas of 0 to 32 inclusive, we use 6 bits.
595 /// We actually store waiter deltas up to 63, since that might reduce
596 /// the number of CAS operations a tiny bit.
598 /// To avoid some futex() calls entirely, TurnSequencer uses an adaptive
599 /// spin cutoff before waiting. The overheads (and convergence rate)
600 /// of separately tracking the spin cutoff for each TurnSequencer would
601 /// be prohibitive, so the actual storage is passed in as a parameter and
602 /// updated atomically. This also lets the caller use different adaptive
603 /// cutoffs for different operations (read versus write, for example).
604 /// To avoid contention, the spin cutoff is only updated when requested
606 template <template<typename> class Atom>
607 struct TurnSequencer {
608 explicit TurnSequencer(const uint32_t firstTurn = 0) noexcept
609 : state_(encode(firstTurn << kTurnShift, 0))
612 /// Returns true iff a call to waitForTurn(turn, ...) won't block
613 bool isTurn(const uint32_t turn) const noexcept {
614 auto state = state_.load(std::memory_order_acquire);
615 return decodeCurrentSturn(state) == (turn << kTurnShift);
618 // Internally we always work with shifted turn values, which makes the
619 // truncation and wraparound work correctly. This leaves us bits at
620 // the bottom to store the number of waiters. We call shifted turns
621 // "sturns" inside this class.
623 /// Blocks the current thread until turn has arrived. If
624 /// updateSpinCutoff is true then this will spin for up to kMaxSpins tries
625 /// before blocking and will adjust spinCutoff based on the results,
626 /// otherwise it will spin for at most spinCutoff spins.
627 void waitForTurn(const uint32_t turn,
628 Atom<uint32_t>& spinCutoff,
629 const bool updateSpinCutoff) noexcept {
630 uint32_t prevThresh = spinCutoff.load(std::memory_order_relaxed);
631 const uint32_t effectiveSpinCutoff =
632 updateSpinCutoff || prevThresh == 0 ? kMaxSpins : prevThresh;
635 const uint32_t sturn = turn << kTurnShift;
636 for (tries = 0; ; ++tries) {
637 uint32_t state = state_.load(std::memory_order_acquire);
638 uint32_t current_sturn = decodeCurrentSturn(state);
639 if (current_sturn == sturn) {
643 // wrap-safe version of assert(current_sturn < sturn)
644 assert(sturn - current_sturn < std::numeric_limits<uint32_t>::max() / 2);
646 // the first effectSpinCutoff tries are spins, after that we will
647 // record ourself as a waiter and block with futexWait
648 if (tries < effectiveSpinCutoff) {
649 asm volatile ("pause");
653 uint32_t current_max_waiter_delta = decodeMaxWaitersDelta(state);
654 uint32_t our_waiter_delta = (sturn - current_sturn) >> kTurnShift;
656 if (our_waiter_delta <= current_max_waiter_delta) {
657 // state already records us as waiters, probably because this
658 // isn't our first time around this loop
661 new_state = encode(current_sturn, our_waiter_delta);
662 if (state != new_state &&
663 !state_.compare_exchange_strong(state, new_state)) {
667 state_.futexWait(new_state, futexChannel(turn));
670 if (updateSpinCutoff || prevThresh == 0) {
671 // if we hit kMaxSpins then spinning was pointless, so the right
672 // spinCutoff is kMinSpins
674 if (tries >= kMaxSpins) {
677 // to account for variations, we allow ourself to spin 2*N when
678 // we think that N is actually required in order to succeed
679 target = std::min<uint32_t>(kMaxSpins,
680 std::max<uint32_t>(kMinSpins, tries * 2));
683 if (prevThresh == 0) {
685 spinCutoff.store(target);
687 // try once, keep moving if CAS fails. Exponential moving average
689 // Be careful that the quantity we add to prevThresh is signed.
690 spinCutoff.compare_exchange_weak(
691 prevThresh, prevThresh + int(target - prevThresh) / 8);
696 /// Unblocks a thread running waitForTurn(turn + 1)
697 void completeTurn(const uint32_t turn) noexcept {
698 uint32_t state = state_.load(std::memory_order_acquire);
700 assert(state == encode(turn << kTurnShift, decodeMaxWaitersDelta(state)));
701 uint32_t max_waiter_delta = decodeMaxWaitersDelta(state);
702 uint32_t new_state = encode(
703 (turn + 1) << kTurnShift,
704 max_waiter_delta == 0 ? 0 : max_waiter_delta - 1);
705 if (state_.compare_exchange_strong(state, new_state)) {
706 if (max_waiter_delta != 0) {
707 state_.futexWake(std::numeric_limits<int>::max(),
708 futexChannel(turn + 1));
712 // failing compare_exchange_strong updates first arg to the value
713 // that caused the failure, so no need to reread state_
717 /// Returns the least-most significant byte of the current uncompleted
718 /// turn. The full 32 bit turn cannot be recovered.
719 uint8_t uncompletedTurnLSB() const noexcept {
720 return state_.load(std::memory_order_acquire) >> kTurnShift;
725 /// kTurnShift counts the bits that are stolen to record the delta
726 /// between the current turn and the maximum waiter. It needs to be big
727 /// enough to record wait deltas of 0 to 32 inclusive. Waiters more
728 /// than 32 in the future will be woken up 32*n turns early (since
729 /// their BITSET will hit) and will adjust the waiter count again.
730 /// We go a bit beyond and let the waiter count go up to 63, which
731 /// is free and might save us a few CAS
733 kWaitersMask = (1 << kTurnShift) - 1,
735 /// The minimum spin count that we will adaptively select
738 /// The maximum spin count that we will adaptively select, and the
739 /// spin count that will be used when probing to get a new data point
740 /// for the adaptation
744 /// This holds both the current turn, and the highest waiting turn,
745 /// stored as (current_turn << 6) | min(63, max(waited_turn - current_turn))
748 /// Returns the bitmask to pass futexWait or futexWake when communicating
749 /// about the specified turn
750 int futexChannel(uint32_t turn) const noexcept {
751 return 1 << (turn & 31);
754 uint32_t decodeCurrentSturn(uint32_t state) const noexcept {
755 return state & ~kWaitersMask;
758 uint32_t decodeMaxWaitersDelta(uint32_t state) const noexcept {
759 return state & kWaitersMask;
762 uint32_t encode(uint32_t currentSturn, uint32_t maxWaiterD) const noexcept {
763 return currentSturn | std::min(uint32_t{ kWaitersMask }, maxWaiterD);
768 /// SingleElementQueue implements a blocking queue that holds at most one
769 /// item, and that requires its users to assign incrementing identifiers
770 /// (turns) to each enqueue and dequeue operation. Note that the turns
771 /// used by SingleElementQueue are doubled inside the TurnSequencer
772 template <typename T, template <typename> class Atom>
773 struct SingleElementQueue {
775 ~SingleElementQueue() noexcept {
776 if ((sequencer_.uncompletedTurnLSB() & 1) == 1) {
777 // we are pending a dequeue, so we have a constructed item
782 /// enqueue using in-place noexcept construction
783 template <typename ...Args,
784 typename = typename std::enable_if<
785 std::is_nothrow_constructible<T,Args...>::value>::type>
786 void enqueue(const uint32_t turn,
787 Atom<uint32_t>& spinCutoff,
788 const bool updateSpinCutoff,
789 Args&&... args) noexcept {
790 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
791 new (&contents_) T(std::forward<Args>(args)...);
792 sequencer_.completeTurn(turn * 2);
795 /// enqueue using move construction, either real (if
796 /// is_nothrow_move_constructible) or simulated using relocation and
797 /// default construction (if IsRelocatable and has_nothrow_constructor)
798 template <typename = typename std::enable_if<
799 (folly::IsRelocatable<T>::value &&
800 boost::has_nothrow_constructor<T>::value) ||
801 std::is_nothrow_constructible<T,T&&>::value>::type>
802 void enqueue(const uint32_t turn,
803 Atom<uint32_t>& spinCutoff,
804 const bool updateSpinCutoff,
805 T&& goner) noexcept {
811 typename std::conditional<std::is_nothrow_constructible<T,T&&>::value,
812 ImplByMove, ImplByRelocation>::type());
815 bool mayEnqueue(const uint32_t turn) const noexcept {
816 return sequencer_.isTurn(turn * 2);
819 void dequeue(uint32_t turn,
820 Atom<uint32_t>& spinCutoff,
821 const bool updateSpinCutoff,
827 typename std::conditional<folly::IsRelocatable<T>::value,
829 ImplByMove>::type());
832 bool mayDequeue(const uint32_t turn) const noexcept {
833 return sequencer_.isTurn(turn * 2 + 1);
837 /// Storage for a T constructed with placement new
838 typename std::aligned_storage<sizeof(T),alignof(T)>::type contents_;
840 /// Even turns are pushes, odd turns are pops
841 TurnSequencer<Atom> sequencer_;
844 return static_cast<T*>(static_cast<void*>(&contents_));
847 void destroyContents() noexcept {
851 // g++ doesn't seem to have std::is_nothrow_destructible yet
854 memset(&contents_, 'Q', sizeof(T));
858 /// Tag classes for dispatching to enqueue/dequeue implementation.
859 struct ImplByRelocation {};
860 struct ImplByMove {};
862 /// enqueue using nothrow move construction.
863 void enqueueImpl(const uint32_t turn,
864 Atom<uint32_t>& spinCutoff,
865 const bool updateSpinCutoff,
867 ImplByMove) noexcept {
868 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
869 new (&contents_) T(std::move(goner));
870 sequencer_.completeTurn(turn * 2);
873 /// enqueue by simulating nothrow move with relocation, followed by
874 /// default construction to a noexcept relocation.
875 void enqueueImpl(const uint32_t turn,
876 Atom<uint32_t>& spinCutoff,
877 const bool updateSpinCutoff,
879 ImplByRelocation) noexcept {
880 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
881 memcpy(&contents_, &goner, sizeof(T));
882 sequencer_.completeTurn(turn * 2);
886 /// dequeue by destructing followed by relocation. This version is preferred,
887 /// because as much work as possible can be done before waiting.
888 void dequeueImpl(uint32_t turn,
889 Atom<uint32_t>& spinCutoff,
890 const bool updateSpinCutoff,
892 ImplByRelocation) noexcept {
896 // unlikely, but if we don't complete our turn the queue will die
898 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
899 memcpy(&elem, &contents_, sizeof(T));
900 sequencer_.completeTurn(turn * 2 + 1);
903 /// dequeue by nothrow move assignment.
904 void dequeueImpl(uint32_t turn,
905 Atom<uint32_t>& spinCutoff,
906 const bool updateSpinCutoff,
908 ImplByMove) noexcept {
909 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
910 elem = std::move(*ptr());
912 sequencer_.completeTurn(turn * 2 + 1);
916 } // namespace detail