2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include <type_traits>
26 #include <boost/noncopyable.hpp>
28 #include <folly/Traits.h>
29 #include <folly/concurrency/CacheLocality.h>
30 #include <folly/detail/TurnSequencer.h>
31 #include <folly/portability/Unistd.h>
37 template <typename T, template <typename> class Atom>
38 struct SingleElementQueue;
40 template <typename T> class MPMCPipelineStageImpl;
42 /// MPMCQueue base CRTP template
43 template <typename> class MPMCQueueBase;
47 /// MPMCQueue<T> is a high-performance bounded concurrent queue that
48 /// supports multiple producers, multiple consumers, and optional blocking.
49 /// The queue has a fixed capacity, for which all memory will be allocated
50 /// up front. The bulk of the work of enqueuing and dequeuing can be
51 /// performed in parallel.
53 /// MPMCQueue is linearizable. That means that if a call to write(A)
54 /// returns before a call to write(B) begins, then A will definitely end up
55 /// in the queue before B, and if a call to read(X) returns before a call
56 /// to read(Y) is started, that X will be something from earlier in the
57 /// queue than Y. This also means that if a read call returns a value, you
58 /// can be sure that all previous elements of the queue have been assigned
59 /// a reader (that reader might not yet have returned, but it exists).
61 /// The underlying implementation uses a ticket dispenser for the head and
62 /// the tail, spreading accesses across N single-element queues to produce
63 /// a queue with capacity N. The ticket dispensers use atomic increment,
64 /// which is more robust to contention than a CAS loop. Each of the
65 /// single-element queues uses its own CAS to serialize access, with an
66 /// adaptive spin cutoff. When spinning fails on a single-element queue
67 /// it uses futex()'s _BITSET operations to reduce unnecessary wakeups
68 /// even if multiple waiters are present on an individual queue (such as
69 /// when the MPMCQueue's capacity is smaller than the number of enqueuers
72 /// In benchmarks (contained in tao/queues/ConcurrentQueueTests)
73 /// it handles 1 to 1, 1 to N, N to 1, and N to M thread counts better
74 /// than any of the alternatives present in fbcode, for both small (~10)
75 /// and large capacities. In these benchmarks it is also faster than
76 /// tbb::concurrent_bounded_queue for all configurations. When there are
77 /// many more threads than cores, MPMCQueue is _much_ faster than the tbb
78 /// queue because it uses futex() to block and unblock waiting threads,
79 /// rather than spinning with sched_yield.
81 /// NOEXCEPT INTERACTION: tl;dr; If it compiles you're fine. Ticket-based
82 /// queues separate the assignment of queue positions from the actual
83 /// construction of the in-queue elements, which means that the T
84 /// constructor used during enqueue must not throw an exception. This is
85 /// enforced at compile time using type traits, which requires that T be
86 /// adorned with accurate noexcept information. If your type does not
87 /// use noexcept, you will have to wrap it in something that provides
88 /// the guarantee. We provide an alternate safe implementation for types
89 /// that don't use noexcept but that are marked folly::IsRelocatable
90 /// and std::is_nothrow_constructible, which is common for folly types.
91 /// In particular, if you can declare FOLLY_ASSUME_FBVECTOR_COMPATIBLE
92 /// then your type can be put in MPMCQueue.
94 /// If you have a pool of N queue consumers that you want to shut down
95 /// after the queue has drained, one way is to enqueue N sentinel values
96 /// to the queue. If the producer doesn't know how many consumers there
97 /// are you can enqueue one sentinel and then have each consumer requeue
98 /// two sentinels after it receives it (by requeuing 2 the shutdown can
99 /// complete in O(log P) time instead of O(P)).
102 template <typename> class Atom = std::atomic,
103 bool Dynamic = false>
104 class MPMCQueue : public detail::MPMCQueueBase<MPMCQueue<T,Atom,Dynamic>> {
105 friend class detail::MPMCPipelineStageImpl<T>;
106 using Slot = detail::SingleElementQueue<T,Atom>;
109 explicit MPMCQueue(size_t queueCapacity)
110 : detail::MPMCQueueBase<MPMCQueue<T,Atom,Dynamic>>(queueCapacity)
112 this->stride_ = this->computeStride(queueCapacity);
113 this->slots_ = new Slot[queueCapacity + 2 * this->kSlotPadding];
116 MPMCQueue() noexcept { }
119 /// The dynamic version of MPMCQueue allows dynamic expansion of queue
120 /// capacity, such that a queue may start with a smaller capacity than
121 /// specified and expand if needed up to the specified capacity.
122 /// Shrinking is not supported at this point.
124 /// Users may optionally specify the initial capacity and the
125 /// expansion multiplier. Otherwise default values are used.
127 /// Operation on the dynamic version have the same semantics as for
128 /// the default fixed-size version, except that the total queue
129 /// capacity can temporarily exceed the specified capacity when there
130 /// are lagging consumers that haven't yet consumed all the elements
131 /// in closed arrays. Users should not rely on the capacity of dynamic
132 /// queues for synchronization, e.g., they should not expect that a
133 /// thread will definitely block on a call to blockingWrite() when the
134 /// queue size is known to be equal to its capacity.
138 /// The design uses a seqlock to enforce mutual exclusion among
139 /// expansion attempts. Regular operations read up-to-date queue
140 /// information (slots array, capacity, stride) inside read-only
141 /// seqlock sections, which are unimpeded when no expansion is in
144 /// An expansion computes a new capacity, allocates a new slots array,
145 /// and updates stride. No information needs to be copied from the
146 /// current slots array to the new one. When this happens, new slots
147 /// will not have sequence numbers that match ticket numbers. The
148 /// expansion needs to compute a ticket offset such that operations
149 /// that use new arrays can adjust the calculations of slot indexes
150 /// and sequence numbers that take into account that the new slots
151 /// start with sequence numbers of zero. The current ticket offset is
152 /// packed with the seqlock in an atomic 64-bit integer. The initial
155 /// Lagging write and read operations with tickets lower than the
156 /// ticket offset of the current slots array (i.e., the minimum ticket
157 /// number that can be served by the current array) must use earlier
158 /// closed arrays instead of the current one. Information about closed
159 /// slots arrays (array address, capacity, stride, and offset) is
160 /// maintained in a logarithmic-sized structure. Each entry in that
161 /// structure never needs to be changed once set. The number of closed
162 /// arrays is half the value of the seqlock (when unlocked).
164 /// The acquisition of the seqlock to perform an expansion does not
165 /// prevent the issuing of new push and pop tickets concurrently. The
166 /// expansion must set the new ticket offset to a value that couldn't
167 /// have been issued to an operation that has already gone through a
168 /// seqlock read-only section (and hence obtained information for
169 /// older closed arrays).
171 /// The dynamic version is a partial specialization of MPMCQueue with
173 template <typename T, template <typename> class Atom>
174 class MPMCQueue<T,Atom,true> :
175 public detail::MPMCQueueBase<MPMCQueue<T,Atom,true>> {
176 friend class detail::MPMCQueueBase<MPMCQueue<T,Atom,true>>;
177 using Slot = detail::SingleElementQueue<T,Atom>;
180 uint64_t offset_ {0};
181 Slot* slots_ {nullptr};
182 size_t capacity_ {0};
188 explicit MPMCQueue(size_t queueCapacity)
189 : detail::MPMCQueueBase<MPMCQueue<T,Atom,true>>(queueCapacity)
191 size_t cap = std::min<size_t>(kDefaultMinDynamicCapacity, queueCapacity);
192 initQueue(cap, kDefaultExpansionMultiplier);
195 explicit MPMCQueue(size_t queueCapacity,
197 size_t expansionMultiplier)
198 : detail::MPMCQueueBase<MPMCQueue<T,Atom,true>>(queueCapacity)
200 minCapacity = std::max<size_t>(1, minCapacity);
201 size_t cap = std::min<size_t>(minCapacity, queueCapacity);
202 expansionMultiplier = std::max<size_t>(2, expansionMultiplier);
203 initQueue(cap, expansionMultiplier);
206 MPMCQueue() noexcept {
211 MPMCQueue(MPMCQueue<T,Atom,true>&& rhs) noexcept {
212 this->capacity_ = rhs.capacity_;
213 this->slots_ = rhs.slots_;
214 this->stride_ = rhs.stride_;
215 this->dstate_.store(rhs.dstate_.load(std::memory_order_relaxed),
216 std::memory_order_relaxed);
217 this->dcapacity_.store(rhs.dcapacity_.load(std::memory_order_relaxed),
218 std::memory_order_relaxed);
219 this->pushTicket_.store(rhs.pushTicket_.load(std::memory_order_relaxed),
220 std::memory_order_relaxed);
221 this->popTicket_.store(rhs.popTicket_.load(std::memory_order_relaxed),
222 std::memory_order_relaxed);
223 this->pushSpinCutoff_.store(
224 rhs.pushSpinCutoff_.load(std::memory_order_relaxed),
225 std::memory_order_relaxed);
226 this->popSpinCutoff_.store(
227 rhs.popSpinCutoff_.load(std::memory_order_relaxed),
228 std::memory_order_relaxed);
230 closed_ = rhs.closed_;
233 rhs.slots_ = nullptr;
235 rhs.dstate_.store(0, std::memory_order_relaxed);
236 rhs.dcapacity_.store(0, std::memory_order_relaxed);
237 rhs.pushTicket_.store(0, std::memory_order_relaxed);
238 rhs.popTicket_.store(0, std::memory_order_relaxed);
239 rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed);
240 rhs.popSpinCutoff_.store(0, std::memory_order_relaxed);
242 rhs.closed_ = nullptr;
245 MPMCQueue<T,Atom, true> const& operator= (MPMCQueue<T,Atom, true>&& rhs) {
248 new (this) MPMCQueue(std::move(rhs));
254 if (closed_ != nullptr) {
255 for (int i = getNumClosed(this->dstate_.load()) - 1; i >= 0; --i) {
256 delete[] closed_[i].slots_;
262 size_t allocatedCapacity() const noexcept {
263 return this->dcapacity_.load(std::memory_order_relaxed);
266 template <typename ...Args>
267 void blockingWrite(Args&&... args) noexcept {
268 uint64_t ticket = this->pushTicket_++;
275 while (UNLIKELY(!trySeqlockReadSection(state, slots, cap, stride))) {
276 asm_volatile_pause();
278 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
279 if (LIKELY(slots[this->idx((ticket - offset), cap, stride)].mayEnqueue(
280 this->turn(ticket - offset, cap)))) {
281 // A slot is ready. Fast path. No need to expand.
285 auto head = this->popTicket_.load(std::memory_order_relaxed);
286 auto avail = std::max(head, offset) + cap;
287 if (ticket < avail) {
288 // May block, but a pop is in progress. No need to expand.
291 // Try to expand, otherwise this operation may block
292 // indefinitely awaiting a consumer to unblock it.
293 if (!tryExpand(state, cap)) {
294 // Can't expand. Block.
297 // Either this or another thread started an expansion Get up-to-date info.
299 this->enqueueWithTicketBase(ticket-offset, slots, cap, stride,
300 std::forward<Args>(args)...);
303 void blockingReadWithTicket(uint64_t& ticket, T& elem) noexcept {
304 ticket = this->popTicket_++;
310 while (UNLIKELY(!trySeqlockReadSection(state, slots, cap, stride))) {
311 asm_volatile_pause();
313 // If there was an expansion after the corresponding push ticket
314 // was issued, adjust accordingly
315 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
316 this->dequeueWithTicketBase(ticket-offset, slots, cap, stride, elem);
322 kDefaultMinDynamicCapacity = 16,
323 kDefaultExpansionMultiplier = 8,
328 // Info about closed slots arrays for use by lagging operations
329 ClosedArray* closed_;
331 void initQueue(const size_t cap, const size_t mult) {
332 this->stride_ = this->computeStride(cap);
333 this->slots_ = new Slot[cap + 2 * this->kSlotPadding];
334 this->dstate_.store(0);
335 this->dcapacity_.store(cap);
337 size_t maxClosed = 0;
338 for (size_t expanded = cap;
339 expanded < this->capacity_;
343 closed_ = (maxClosed > 0) ? new ClosedArray[maxClosed] : nullptr;
346 bool tryObtainReadyPushTicket(
347 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
351 ticket = this->pushTicket_.load(std::memory_order_acquire); // A
352 if (UNLIKELY(!trySeqlockReadSection(state, slots, cap, stride))) {
353 asm_volatile_pause();
356 // If there was an expansion with offset greater than this ticket,
357 // adjust accordingly
359 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
360 if (LIKELY(slots[this->idx((ticket - offset), cap, stride)].mayEnqueue(
361 this->turn(ticket - offset, cap)))) {
363 if (UNLIKELY(!this->pushTicket_.compare_exchange_strong(
364 ticket, ticket + 1))) {
367 // Validate that state is the same
368 if (LIKELY(state == this->dstate_.load(std::memory_order_acquire))) {
372 // Slow path - state changed - get up-to-date info for obtained ticket
374 state = this->dstate_.load(std::memory_order_acquire);
375 if (trySeqlockReadSection(state, slots, cap, stride)) {
378 asm_volatile_pause();
380 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
384 // slow path - no ready ticket
385 if (ticket != this->pushTicket_.load(std::memory_order_relaxed)) { // B
386 // Ticket changed. Start over.
389 auto head = this->popTicket_.load(std::memory_order_acquire);
390 auto avail = std::max(head, offset) + cap;
391 if (ticket < avail) {
392 // a consumer is in the process of making the slot available
393 // don't try to expand. Spin if capacity is not
394 // exhausted. Otherwise return false.
395 if (cap == this->capacity_) {
398 asm_volatile_pause();
401 // Likely to block. Try to expand.
402 if (tryExpand(state, cap)) {
403 // This or another thread started an expansion. Get up-to-date info.
406 // No ready ticket and cannot expand
411 bool tryObtainPromisedPushTicket(
412 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
416 ticket = this->pushTicket_.load(std::memory_order_acquire);
417 auto head = this->popTicket_.load(std::memory_order_acquire);
418 if (UNLIKELY(!trySeqlockReadSection(state, slots, cap, stride))) {
419 asm_volatile_pause();
422 // If there was an expansion with offset greater than this ticket,
423 // adjust accordingly
425 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
426 auto avail = std::max(offset, head) + cap;
427 if (UNLIKELY(ticket >= avail)) {
428 if (tryExpand(state, cap)) {
429 // Space may be available. Start over.
437 if (UNLIKELY((!this->pushTicket_.compare_exchange_strong(
438 ticket, ticket + 1)))) {
441 // Validate that state is the same
442 if (LIKELY(state == this->dstate_.load(std::memory_order_acquire))) {
446 // Obtained ticket but info is out-of-date - Update info
448 state = this->dstate_.load(std::memory_order_acquire);
449 if (trySeqlockReadSection(state, slots, cap, stride)) {
452 asm_volatile_pause();
454 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
460 bool tryObtainReadyPopTicket(
461 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
465 ticket = this->popTicket_.load(std::memory_order_relaxed);
466 if (UNLIKELY(!trySeqlockReadSection(state, slots, cap, stride))) {
467 asm_volatile_pause();
470 // If there was an expansion after the corresponding push ticket
471 // was issued, adjust accordingly
473 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
474 if (UNLIKELY(!slots[this->idx((ticket - offset), cap, stride)].mayDequeue(
475 this->turn(ticket - offset, cap)))) {
479 !this->popTicket_.compare_exchange_strong(ticket, ticket + 1))) {
482 // Validate that state is the same
483 if (LIKELY(state == this->dstate_.load(std::memory_order_acquire))) {
487 // Obtained ticket but info is out-of-date - Update info
489 state = this->dstate_.load(std::memory_order_acquire);
490 if (trySeqlockReadSection(state, slots, cap, stride)) {
493 asm_volatile_pause();
495 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
501 bool tryObtainPromisedPopTicket(
502 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
506 ticket = this->popTicket_.load(std::memory_order_acquire);
507 auto numPushes = this->pushTicket_.load(std::memory_order_acquire);
508 if (UNLIKELY(!trySeqlockReadSection(state, slots, cap, stride))) {
509 asm_volatile_pause();
513 // If there was an expansion after the corresponding push
514 // ticket was issued, adjust accordingly
515 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
516 if (UNLIKELY(ticket >= numPushes)) {
521 !this->popTicket_.compare_exchange_strong(ticket, ticket + 1))) {
524 // Validate that state is the same
525 if (LIKELY(state == this->dstate_.load(std::memory_order_acquire))) {
529 // Obtained ticket but info is out-of-date - Update info
531 state = this->dstate_.load(std::memory_order_acquire);
532 if (trySeqlockReadSection(state, slots, cap, stride)) {
535 asm_volatile_pause();
537 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
543 /// Enqueues an element with a specific ticket number
544 template <typename ...Args>
545 void enqueueWithTicket(const uint64_t ticket, Args&&... args) noexcept {
552 while (UNLIKELY(!trySeqlockReadSection(state, slots, cap, stride))) {
553 asm_volatile_pause();
556 // If there was an expansion after this ticket was issued, adjust
558 maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride);
560 this->enqueueWithTicketBase(ticket-offset, slots, cap, stride,
561 std::forward<Args>(args)...);
564 uint64_t getOffset(const uint64_t state) const noexcept {
565 return state >> kSeqlockBits;
568 int getNumClosed(const uint64_t state) const noexcept {
569 return (state & ((1 << kSeqlockBits) - 1)) >> 1;
572 /// Try to expand the queue. Returns true if this expansion was
573 /// successful or a concurent expansion is in progresse. Returns
574 /// false if the queue has reached its maximum capacity or
575 /// allocation has failed.
576 bool tryExpand(const uint64_t state, const size_t cap) noexcept {
577 if (LIKELY(cap == this->capacity_)) {
580 return tryExpandWithSeqlock(state, cap);
583 bool tryExpandWithSeqlock(const uint64_t state, const size_t cap) noexcept {
585 uint64_t oldval = state;
586 assert((state & 1) == 0);
587 if (!this->dstate_.compare_exchange_strong(oldval, state + 1)) {
588 // Failed to acquire seqlock. Another thread acaquired it.
589 // Go back to the caller and get up-to-date info.
592 // Write critical section
593 assert(cap == this->dcapacity_.load());
594 auto head = this->popTicket_.load(std::memory_order_acquire);
595 auto avail = std::max(head, getOffset(state)) + cap;
596 uint64_t newOffset = avail;
597 size_t newCapacity = std::min(dmult_ * cap, this->capacity_);
599 new (std::nothrow) Slot[newCapacity + 2 * this->kSlotPadding];
600 if (newSlots == nullptr) {
601 // Expansion failed. Restore the seqlock
602 this->dstate_.store(state);
605 // Successful expansion
606 // calculate the current ticket offset
607 uint64_t offset = getOffset(state);
608 // calculate index in list of closed slots arrays
609 int index = getNumClosed(state);
610 assert((index << 1) < (1 << kSeqlockBits));
611 // fill the info for the closed slots array
612 closed_[index].offset_ = offset;
613 closed_[index].slots_ = this->dslots_.load();
614 closed_[index].capacity_ = cap;
615 closed_[index].stride_ = this->dstride_.load();
616 // update the new slots array info
617 this->dslots_.store(newSlots);
618 this->dcapacity_.store(newCapacity);
619 this->dstride_.store(this->computeStride(newCapacity));
620 // Release the seqlock and record the new ticket offset
621 this->dstate_.store((newOffset << kSeqlockBits) + (2 * (index + 1)));
625 /// Seqlock read-only section
626 bool trySeqlockReadSection(
627 uint64_t& state, Slot*& slots, size_t& cap, int& stride
629 state = this->dstate_.load(std::memory_order_acquire);
630 if (UNLIKELY(state & 1)) {
634 // Start read-only section.
635 slots = this->dslots_.load(std::memory_order_relaxed);
636 cap = this->dcapacity_.load(std::memory_order_relaxed);
637 stride = this->dstride_.load(std::memory_order_relaxed);
638 // End of read-only section. Validate seqlock.
639 std::atomic_thread_fence(std::memory_order_acquire);
640 return LIKELY(state == this->dstate_.load(std::memory_order_relaxed));
643 /// If there was an expansion after ticket was issued, update local variables
644 /// of the lagging operation using the most recent closed array with
645 /// offset <= ticket and return true. Otherwise, return false;
646 bool maybeUpdateFromClosed(
647 const uint64_t state,
648 const uint64_t ticket,
652 int& stride) noexcept {
653 offset = getOffset(state);
654 if (LIKELY(ticket >= offset)) {
657 updateFromClosed(state, ticket, offset, slots, cap, stride);
661 void updateFromClosed(
662 const uint64_t state,
663 const uint64_t ticket,
667 int& stride) noexcept {
668 for (int i = getNumClosed(state) - 1; i >= 0; --i) {
669 offset = closed_[i].offset_;
670 if (offset <= ticket) {
671 slots = closed_[i].slots_;
672 cap = closed_[i].capacity_;
673 stride = closed_[i].stride_;
677 // A closed array with offset <= ticket should have been found
685 /// CRTP specialization of MPMCQueueBase
687 template <typename T, template <typename> class Atom, bool Dynamic>
690 template <typename> class Atom,
692 class MPMCQueueBase<Derived<T, Atom, Dynamic>> : boost::noncopyable {
694 // Note: Using CRTP static casts in several functions of this base
695 // template instead of making called functions virtual or duplicating
696 // the code of calling functions in the derived partially specialized
699 static_assert(std::is_nothrow_constructible<T,T&&>::value ||
700 folly::IsRelocatable<T>::value,
701 "T must be relocatable or have a noexcept move constructor");
704 typedef T value_type;
706 using Slot = detail::SingleElementQueue<T,Atom>;
708 explicit MPMCQueueBase(size_t queueCapacity)
709 : capacity_(queueCapacity)
715 if (queueCapacity == 0) {
716 throw std::invalid_argument(
717 "MPMCQueue with explicit capacity 0 is impossible"
718 // Stride computation in derived classes would sigfpe if capacity is 0
722 // ideally this would be a static assert, but g++ doesn't allow it
723 assert(alignof(MPMCQueue<T, Atom>) >= CacheLocality::kFalseSharingRange);
725 static_cast<uint8_t*>(static_cast<void*>(&popTicket_)) -
726 static_cast<uint8_t*>(static_cast<void*>(&pushTicket_)) >=
727 CacheLocality::kFalseSharingRange);
730 /// A default-constructed queue is useful because a usable (non-zero
731 /// capacity) queue can be moved onto it or swapped with it
732 MPMCQueueBase() noexcept
744 /// IMPORTANT: The move constructor is here to make it easier to perform
745 /// the initialization phase, it is not safe to use when there are any
746 /// concurrent accesses (this is not checked).
747 MPMCQueueBase(MPMCQueueBase<Derived<T,Atom,Dynamic>>&& rhs) noexcept
748 : capacity_(rhs.capacity_)
750 , stride_(rhs.stride_)
751 , dstate_(rhs.dstate_.load(std::memory_order_relaxed))
752 , dcapacity_(rhs.dcapacity_.load(std::memory_order_relaxed))
753 , pushTicket_(rhs.pushTicket_.load(std::memory_order_relaxed))
754 , popTicket_(rhs.popTicket_.load(std::memory_order_relaxed))
755 , pushSpinCutoff_(rhs.pushSpinCutoff_.load(std::memory_order_relaxed))
756 , popSpinCutoff_(rhs.popSpinCutoff_.load(std::memory_order_relaxed))
758 // relaxed ops are okay for the previous reads, since rhs queue can't
759 // be in concurrent use
763 rhs.slots_ = nullptr;
765 rhs.dstate_.store(0, std::memory_order_relaxed);
766 rhs.dcapacity_.store(0, std::memory_order_relaxed);
767 rhs.pushTicket_.store(0, std::memory_order_relaxed);
768 rhs.popTicket_.store(0, std::memory_order_relaxed);
769 rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed);
770 rhs.popSpinCutoff_.store(0, std::memory_order_relaxed);
773 /// IMPORTANT: The move operator is here to make it easier to perform
774 /// the initialization phase, it is not safe to use when there are any
775 /// concurrent accesses (this is not checked).
776 MPMCQueueBase<Derived<T,Atom,Dynamic>> const& operator=
777 (MPMCQueueBase<Derived<T,Atom,Dynamic>>&& rhs) {
779 this->~MPMCQueueBase();
780 new (this) MPMCQueueBase(std::move(rhs));
785 /// MPMCQueue can only be safely destroyed when there are no
786 /// pending enqueuers or dequeuers (this is not checked).
791 /// Returns the number of writes (including threads that are blocked waiting
792 /// to write) minus the number of reads (including threads that are blocked
793 /// waiting to read). So effectively, it becomes:
794 /// elements in queue + pending(calls to write) - pending(calls to read).
795 /// If nothing is pending, then the method returns the actual number of
796 /// elements in the queue.
797 /// The returned value can be negative if there are no writers and the queue
798 /// is empty, but there is one reader that is blocked waiting to read (in
799 /// which case, the returned size will be -1).
800 ssize_t size() const noexcept {
801 // since both pushes and pops increase monotonically, we can get a
802 // consistent snapshot either by bracketing a read of popTicket_ with
803 // two reads of pushTicket_ that return the same value, or the other
804 // way around. We maximize our chances by alternately attempting
806 uint64_t pushes = pushTicket_.load(std::memory_order_acquire); // A
807 uint64_t pops = popTicket_.load(std::memory_order_acquire); // B
809 uint64_t nextPushes = pushTicket_.load(std::memory_order_acquire); // C
810 if (pushes == nextPushes) {
811 // pushTicket_ didn't change from A (or the previous C) to C,
812 // so we can linearize at B (or D)
813 return ssize_t(pushes - pops);
816 uint64_t nextPops = popTicket_.load(std::memory_order_acquire); // D
817 if (pops == nextPops) {
818 // popTicket_ didn't chance from B (or the previous D), so we
819 // can linearize at C
820 return ssize_t(pushes - pops);
826 /// Returns true if there are no items available for dequeue
827 bool isEmpty() const noexcept {
831 /// Returns true if there is currently no empty space to enqueue
832 bool isFull() const noexcept {
833 // careful with signed -> unsigned promotion, since size can be negative
834 return size() >= static_cast<ssize_t>(capacity_);
837 /// Returns is a guess at size() for contexts that don't need a precise
838 /// value, such as stats. More specifically, it returns the number of writes
839 /// minus the number of reads, but after reading the number of writes, more
840 /// writers could have came before the number of reads was sampled,
841 /// and this method doesn't protect against such case.
842 /// The returned value can be negative.
843 ssize_t sizeGuess() const noexcept {
844 return writeCount() - readCount();
848 size_t capacity() const noexcept {
852 /// Doesn't change for non-dynamic
853 size_t allocatedCapacity() const noexcept {
857 /// Returns the total number of calls to blockingWrite or successful
858 /// calls to write, including those blockingWrite calls that are
859 /// currently blocking
860 uint64_t writeCount() const noexcept {
861 return pushTicket_.load(std::memory_order_acquire);
864 /// Returns the total number of calls to blockingRead or successful
865 /// calls to read, including those blockingRead calls that are currently
867 uint64_t readCount() const noexcept {
868 return popTicket_.load(std::memory_order_acquire);
871 /// Enqueues a T constructed from args, blocking until space is
872 /// available. Note that this method signature allows enqueue via
873 /// move, if args is a T rvalue, via copy, if args is a T lvalue, or
874 /// via emplacement if args is an initializer list that can be passed
875 /// to a T constructor.
876 template <typename ...Args>
877 void blockingWrite(Args&&... args) noexcept {
878 enqueueWithTicketBase(pushTicket_++, slots_, capacity_, stride_,
879 std::forward<Args>(args)...);
882 /// If an item can be enqueued with no blocking, does so and returns
883 /// true, otherwise returns false. This method is similar to
884 /// writeIfNotFull, but if you don't have a specific need for that
885 /// method you should use this one.
887 /// One of the common usages of this method is to enqueue via the
888 /// move constructor, something like q.write(std::move(x)). If write
889 /// returns false because the queue is full then x has not actually been
890 /// consumed, which looks strange. To understand why it is actually okay
891 /// to use x afterward, remember that std::move is just a typecast that
892 /// provides an rvalue reference that enables use of a move constructor
893 /// or operator. std::move doesn't actually move anything. It could
894 /// more accurately be called std::rvalue_cast or std::move_permission.
895 template <typename ...Args>
896 bool write(Args&&... args) noexcept {
901 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
902 tryObtainReadyPushTicket(ticket, slots, cap, stride)) {
903 // we have pre-validated that the ticket won't block
904 enqueueWithTicketBase(ticket, slots, cap, stride,
905 std::forward<Args>(args)...);
912 template <class Clock, typename... Args>
913 bool tryWriteUntil(const std::chrono::time_point<Clock>& when,
914 Args&&... args) noexcept {
919 if (tryObtainPromisedPushTicketUntil(ticket, slots, cap, stride, when)) {
920 // we have pre-validated that the ticket won't block, or rather that
921 // it won't block longer than it takes another thread to dequeue an
922 // element from the slot it identifies.
923 enqueueWithTicketBase(ticket, slots, cap, stride,
924 std::forward<Args>(args)...);
931 /// If the queue is not full, enqueues and returns true, otherwise
932 /// returns false. Unlike write this method can be blocked by another
933 /// thread, specifically a read that has linearized (been assigned
934 /// a ticket) but not yet completed. If you don't really need this
935 /// function you should probably use write.
937 /// MPMCQueue isn't lock-free, so just because a read operation has
938 /// linearized (and isFull is false) doesn't mean that space has been
939 /// made available for another write. In this situation write will
940 /// return false, but writeIfNotFull will wait for the dequeue to finish.
941 /// This method is required if you are composing queues and managing
942 /// your own wakeup, because it guarantees that after every successful
943 /// write a readIfNotEmpty will succeed.
944 template <typename ...Args>
945 bool writeIfNotFull(Args&&... args) noexcept {
950 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
951 tryObtainPromisedPushTicket(ticket, slots, cap, stride)) {
952 // some other thread is already dequeuing the slot into which we
953 // are going to enqueue, but we might have to wait for them to finish
954 enqueueWithTicketBase(ticket, slots, cap, stride,
955 std::forward<Args>(args)...);
962 /// Moves a dequeued element onto elem, blocking until an element
964 void blockingRead(T& elem) noexcept {
966 static_cast<Derived<T,Atom,Dynamic>*>(this)->
967 blockingReadWithTicket(ticket, elem);
970 /// Same as blockingRead() but also records the ticket nunmer
971 void blockingReadWithTicket(uint64_t& ticket, T& elem) noexcept {
972 assert(capacity_ != 0);
973 ticket = popTicket_++;
974 dequeueWithTicketBase(ticket, slots_, capacity_, stride_, elem);
977 /// If an item can be dequeued with no blocking, does so and returns
978 /// true, otherwise returns false.
979 bool read(T& elem) noexcept {
981 return readAndGetTicket(ticket, elem);
984 /// Same as read() but also records the ticket nunmer
985 bool readAndGetTicket(uint64_t& ticket, T& elem) noexcept {
989 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
990 tryObtainReadyPopTicket(ticket, slots, cap, stride)) {
991 // the ticket has been pre-validated to not block
992 dequeueWithTicketBase(ticket, slots, cap, stride, elem);
999 template <class Clock, typename... Args>
1001 const std::chrono::time_point<Clock>& when,
1007 if (tryObtainPromisedPopTicketUntil(ticket, slots, cap, stride, when)) {
1008 // we have pre-validated that the ticket won't block, or rather that
1009 // it won't block longer than it takes another thread to enqueue an
1010 // element on the slot it identifies.
1011 dequeueWithTicketBase(ticket, slots, cap, stride, elem);
1018 /// If the queue is not empty, dequeues and returns true, otherwise
1019 /// returns false. If the matching write is still in progress then this
1020 /// method may block waiting for it. If you don't rely on being able
1021 /// to dequeue (such as by counting completed write) then you should
1023 bool readIfNotEmpty(T& elem) noexcept {
1028 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
1029 tryObtainPromisedPopTicket(ticket, slots, cap, stride)) {
1030 // the matching enqueue already has a ticket, but might not be done
1031 dequeueWithTicketBase(ticket, slots, cap, stride, elem);
1040 /// Once every kAdaptationFreq we will spin longer, to try to estimate
1041 /// the proper spin backoff
1042 kAdaptationFreq = 128,
1044 /// To avoid false sharing in slots_ with neighboring memory
1045 /// allocations, we pad it with this many SingleElementQueue-s at
1047 kSlotPadding = (CacheLocality::kFalseSharingRange - 1) / sizeof(Slot) + 1
1050 /// The maximum number of items in the queue at once
1051 size_t FOLLY_ALIGN_TO_AVOID_FALSE_SHARING capacity_;
1053 /// Anonymous union for use when Dynamic = false and true, respectively
1055 /// An array of capacity_ SingleElementQueue-s, each of which holds
1056 /// either 0 or 1 item. We over-allocate by 2 * kSlotPadding and don't
1057 /// touch the slots at either end, to avoid false sharing
1059 /// Current dynamic slots array of dcapacity_ SingleElementQueue-s
1060 Atom<Slot*> dslots_;
1063 /// Anonymous union for use when Dynamic = false and true, respectively
1065 /// The number of slots_ indices that we advance for each ticket, to
1066 /// avoid false sharing. Ideally slots_[i] and slots_[i + stride_]
1067 /// aren't on the same cache line
1073 /// The following two memebers are used by dynamic MPMCQueue.
1074 /// Ideally they should be in MPMCQueue<T,Atom,true>, but we get
1075 /// better cache locality if they are in the same cache line as
1076 /// dslots_ and dstride_.
1078 /// Dynamic state. A packed seqlock and ticket offset
1079 Atom<uint64_t> dstate_;
1080 /// Dynamic capacity
1081 Atom<size_t> dcapacity_;
1083 /// Enqueuers get tickets from here
1084 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushTicket_;
1086 /// Dequeuers get tickets from here
1087 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popTicket_;
1089 /// This is how many times we will spin before using FUTEX_WAIT when
1090 /// the queue is full on enqueue, adaptively computed by occasionally
1091 /// spinning for longer and smoothing with an exponential moving average
1092 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_;
1094 /// The adaptive spin cutoff when the queue is empty on dequeue
1095 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_;
1097 /// Alignment doesn't prevent false sharing at the end of the struct,
1098 /// so fill out the last cache line
1099 char padding_[CacheLocality::kFalseSharingRange - sizeof(Atom<uint32_t>)];
1101 /// We assign tickets in increasing order, but we don't want to
1102 /// access neighboring elements of slots_ because that will lead to
1103 /// false sharing (multiple cores accessing the same cache line even
1104 /// though they aren't accessing the same bytes in that cache line).
1105 /// To avoid this we advance by stride slots per ticket.
1107 /// We need gcd(capacity, stride) to be 1 so that we will use all
1108 /// of the slots. We ensure this by only considering prime strides,
1109 /// which either have no common divisors with capacity or else have
1110 /// a zero remainder after dividing by capacity. That is sufficient
1111 /// to guarantee correctness, but we also want to actually spread the
1112 /// accesses away from each other to avoid false sharing (consider a
1113 /// stride of 7 with a capacity of 8). To that end we try a few taking
1114 /// care to observe that advancing by -1 is as bad as advancing by 1
1115 /// when in comes to false sharing.
1117 /// The simple way to avoid false sharing would be to pad each
1118 /// SingleElementQueue, but since we have capacity_ of them that could
1119 /// waste a lot of space.
1120 static int computeStride(size_t capacity) noexcept {
1121 static const int smallPrimes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23 };
1125 for (int stride : smallPrimes) {
1126 if ((stride % capacity) == 0 || (capacity % stride) == 0) {
1129 size_t sep = stride % capacity;
1130 sep = std::min(sep, capacity - sep);
1131 if (sep > bestSep) {
1132 bestStride = stride;
1139 /// Returns the index into slots_ that should be used when enqueuing or
1140 /// dequeuing with the specified ticket
1141 size_t idx(uint64_t ticket, size_t cap, int stride) noexcept {
1142 return ((ticket * stride) % cap) + kSlotPadding;
1145 /// Maps an enqueue or dequeue ticket to the turn should be used at the
1146 /// corresponding SingleElementQueue
1147 uint32_t turn(uint64_t ticket, size_t cap) noexcept {
1149 return uint32_t(ticket / cap);
1152 /// Tries to obtain a push ticket for which SingleElementQueue::enqueue
1153 /// won't block. Returns true on immediate success, false on immediate
1155 bool tryObtainReadyPushTicket(
1156 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1158 ticket = pushTicket_.load(std::memory_order_acquire); // A
1163 if (!slots[idx(ticket, cap, stride)]
1164 .mayEnqueue(turn(ticket, cap))) {
1165 // if we call enqueue(ticket, ...) on the SingleElementQueue
1166 // right now it would block, but this might no longer be the next
1167 // ticket. We can increase the chance of tryEnqueue success under
1168 // contention (without blocking) by rechecking the ticket dispenser
1170 ticket = pushTicket_.load(std::memory_order_acquire); // B
1171 if (prev == ticket) {
1172 // mayEnqueue was bracketed by two reads (A or prev B or prev
1173 // failing CAS to B), so we are definitely unable to enqueue
1177 // we will bracket the mayEnqueue check with a read (A or prev B
1178 // or prev failing CAS) and the following CAS. If the CAS fails
1179 // it will effect a load of pushTicket_
1180 if (pushTicket_.compare_exchange_strong(ticket, ticket + 1)) {
1187 /// Tries until when to obtain a push ticket for which
1188 /// SingleElementQueue::enqueue won't block. Returns true on success, false
1190 /// ticket is filled on success AND failure.
1191 template <class Clock>
1192 bool tryObtainPromisedPushTicketUntil(
1193 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride,
1194 const std::chrono::time_point<Clock>& when
1196 bool deadlineReached = false;
1197 while (!deadlineReached) {
1198 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
1199 tryObtainPromisedPushTicket(ticket, slots, cap, stride)) {
1202 // ticket is a blocking ticket until the preceding ticket has been
1203 // processed: wait until this ticket's turn arrives. We have not reserved
1204 // this ticket so we will have to re-attempt to get a non-blocking ticket
1205 // if we wake up before we time-out.
1206 deadlineReached = !slots[idx(ticket, cap, stride)]
1207 .tryWaitForEnqueueTurnUntil(turn(ticket, cap), pushSpinCutoff_,
1208 (ticket % kAdaptationFreq) == 0, when);
1213 /// Tries to obtain a push ticket which can be satisfied if all
1214 /// in-progress pops complete. This function does not block, but
1215 /// blocking may be required when using the returned ticket if some
1216 /// other thread's pop is still in progress (ticket has been granted but
1217 /// pop has not yet completed).
1218 bool tryObtainPromisedPushTicket(
1219 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1221 auto numPushes = pushTicket_.load(std::memory_order_acquire); // A
1227 const auto numPops = popTicket_.load(std::memory_order_acquire); // B
1228 // n will be negative if pops are pending
1229 const int64_t n = int64_t(numPushes - numPops);
1230 if (n >= static_cast<ssize_t>(capacity_)) {
1231 // Full, linearize at B. We don't need to recheck the read we
1232 // performed at A, because if numPushes was stale at B then the
1233 // real numPushes value is even worse
1236 if (pushTicket_.compare_exchange_strong(numPushes, numPushes + 1)) {
1242 /// Tries to obtain a pop ticket for which SingleElementQueue::dequeue
1243 /// won't block. Returns true on immediate success, false on immediate
1245 bool tryObtainReadyPopTicket(
1246 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1248 ticket = popTicket_.load(std::memory_order_acquire);
1253 if (!slots[idx(ticket, cap, stride)]
1254 .mayDequeue(turn(ticket, cap))) {
1256 ticket = popTicket_.load(std::memory_order_acquire);
1257 if (prev == ticket) {
1261 if (popTicket_.compare_exchange_strong(ticket, ticket + 1)) {
1268 /// Tries until when to obtain a pop ticket for which
1269 /// SingleElementQueue::dequeue won't block. Returns true on success, false
1271 /// ticket is filled on success AND failure.
1272 template <class Clock>
1273 bool tryObtainPromisedPopTicketUntil(
1278 const std::chrono::time_point<Clock>& when) noexcept {
1279 bool deadlineReached = false;
1280 while (!deadlineReached) {
1281 if (static_cast<Derived<T, Atom, Dynamic>*>(this)
1282 ->tryObtainPromisedPopTicket(ticket, slots, cap, stride)) {
1285 // ticket is a blocking ticket until the preceding ticket has been
1286 // processed: wait until this ticket's turn arrives. We have not reserved
1287 // this ticket so we will have to re-attempt to get a non-blocking ticket
1288 // if we wake up before we time-out.
1290 !slots[idx(ticket, cap, stride)].tryWaitForDequeueTurnUntil(
1293 (ticket % kAdaptationFreq) == 0,
1299 /// Similar to tryObtainReadyPopTicket, but returns a pop ticket whose
1300 /// corresponding push ticket has already been handed out, rather than
1301 /// returning one whose corresponding push ticket has already been
1302 /// completed. This means that there is a possibility that the caller
1303 /// will block when using the ticket, but it allows the user to rely on
1304 /// the fact that if enqueue has succeeded, tryObtainPromisedPopTicket
1305 /// will return true. The "try" part of this is that we won't have
1306 /// to block waiting for someone to call enqueue, although we might
1307 /// have to block waiting for them to finish executing code inside the
1308 /// MPMCQueue itself.
1309 bool tryObtainPromisedPopTicket(
1310 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1312 auto numPops = popTicket_.load(std::memory_order_acquire); // A
1318 const auto numPushes = pushTicket_.load(std::memory_order_acquire); // B
1319 if (numPops >= numPushes) {
1320 // Empty, or empty with pending pops. Linearize at B. We don't
1321 // need to recheck the read we performed at A, because if numPops
1322 // is stale then the fresh value is larger and the >= is still true
1325 if (popTicket_.compare_exchange_strong(numPops, numPops + 1)) {
1331 // Given a ticket, constructs an enqueued item using args
1332 template <typename ...Args>
1333 void enqueueWithTicketBase(
1334 uint64_t ticket, Slot* slots, size_t cap, int stride, Args&&... args
1336 slots[idx(ticket, cap, stride)]
1337 .enqueue(turn(ticket, cap),
1339 (ticket % kAdaptationFreq) == 0,
1340 std::forward<Args>(args)...);
1343 // To support tracking ticket numbers in MPMCPipelineStageImpl
1344 template <typename ...Args>
1345 void enqueueWithTicket(uint64_t ticket, Args&&... args) noexcept {
1346 enqueueWithTicketBase(ticket, slots_, capacity_, stride_,
1347 std::forward<Args>(args)...);
1350 // Given a ticket, dequeues the corresponding element
1351 void dequeueWithTicketBase(
1352 uint64_t ticket, Slot* slots, size_t cap, int stride, T& elem
1355 slots[idx(ticket, cap, stride)]
1356 .dequeue(turn(ticket, cap),
1358 (ticket % kAdaptationFreq) == 0,
1363 /// SingleElementQueue implements a blocking queue that holds at most one
1364 /// item, and that requires its users to assign incrementing identifiers
1365 /// (turns) to each enqueue and dequeue operation. Note that the turns
1366 /// used by SingleElementQueue are doubled inside the TurnSequencer
1367 template <typename T, template <typename> class Atom>
1368 struct SingleElementQueue {
1370 ~SingleElementQueue() noexcept {
1371 if ((sequencer_.uncompletedTurnLSB() & 1) == 1) {
1372 // we are pending a dequeue, so we have a constructed item
1377 /// enqueue using in-place noexcept construction
1380 typename = typename std::enable_if<
1381 std::is_nothrow_constructible<T, Args...>::value>::type>
1382 void enqueue(const uint32_t turn,
1383 Atom<uint32_t>& spinCutoff,
1384 const bool updateSpinCutoff,
1385 Args&&... args) noexcept {
1386 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
1387 new (&contents_) T(std::forward<Args>(args)...);
1388 sequencer_.completeTurn(turn * 2);
1391 /// enqueue using move construction, either real (if
1392 /// is_nothrow_move_constructible) or simulated using relocation and
1393 /// default construction (if IsRelocatable and is_nothrow_constructible)
1395 typename = typename std::enable_if<
1396 (folly::IsRelocatable<T>::value &&
1397 std::is_nothrow_constructible<T>::value) ||
1398 std::is_nothrow_constructible<T, T&&>::value>::type>
1400 const uint32_t turn,
1401 Atom<uint32_t>& spinCutoff,
1402 const bool updateSpinCutoff,
1403 T&& goner) noexcept {
1409 typename std::conditional<std::is_nothrow_constructible<T,T&&>::value,
1410 ImplByMove, ImplByRelocation>::type());
1413 /// Waits until either:
1414 /// 1: the dequeue turn preceding the given enqueue turn has arrived
1415 /// 2: the given deadline has arrived
1416 /// Case 1 returns true, case 2 returns false.
1417 template <class Clock>
1418 bool tryWaitForEnqueueTurnUntil(
1419 const uint32_t turn,
1420 Atom<uint32_t>& spinCutoff,
1421 const bool updateSpinCutoff,
1422 const std::chrono::time_point<Clock>& when) noexcept {
1423 return sequencer_.tryWaitForTurn(
1424 turn * 2, spinCutoff, updateSpinCutoff, &when) !=
1425 TurnSequencer<Atom>::TryWaitResult::TIMEDOUT;
1428 bool mayEnqueue(const uint32_t turn) const noexcept {
1429 return sequencer_.isTurn(turn * 2);
1432 void dequeue(uint32_t turn,
1433 Atom<uint32_t>& spinCutoff,
1434 const bool updateSpinCutoff,
1440 typename std::conditional<folly::IsRelocatable<T>::value,
1442 ImplByMove>::type());
1445 /// Waits until either:
1446 /// 1: the enqueue turn preceding the given dequeue turn has arrived
1447 /// 2: the given deadline has arrived
1448 /// Case 1 returns true, case 2 returns false.
1449 template <class Clock>
1450 bool tryWaitForDequeueTurnUntil(
1451 const uint32_t turn,
1452 Atom<uint32_t>& spinCutoff,
1453 const bool updateSpinCutoff,
1454 const std::chrono::time_point<Clock>& when) noexcept {
1455 return sequencer_.tryWaitForTurn(
1456 turn * 2 + 1, spinCutoff, updateSpinCutoff, &when) !=
1457 TurnSequencer<Atom>::TryWaitResult::TIMEDOUT;
1460 bool mayDequeue(const uint32_t turn) const noexcept {
1461 return sequencer_.isTurn(turn * 2 + 1);
1465 /// Storage for a T constructed with placement new
1466 typename std::aligned_storage<sizeof(T),alignof(T)>::type contents_;
1468 /// Even turns are pushes, odd turns are pops
1469 TurnSequencer<Atom> sequencer_;
1472 return static_cast<T*>(static_cast<void*>(&contents_));
1475 void destroyContents() noexcept {
1479 // g++ doesn't seem to have std::is_nothrow_destructible yet
1482 memset(&contents_, 'Q', sizeof(T));
1486 /// Tag classes for dispatching to enqueue/dequeue implementation.
1487 struct ImplByRelocation {};
1488 struct ImplByMove {};
1490 /// enqueue using nothrow move construction.
1491 void enqueueImpl(const uint32_t turn,
1492 Atom<uint32_t>& spinCutoff,
1493 const bool updateSpinCutoff,
1495 ImplByMove) noexcept {
1496 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
1497 new (&contents_) T(std::move(goner));
1498 sequencer_.completeTurn(turn * 2);
1501 /// enqueue by simulating nothrow move with relocation, followed by
1502 /// default construction to a noexcept relocation.
1503 void enqueueImpl(const uint32_t turn,
1504 Atom<uint32_t>& spinCutoff,
1505 const bool updateSpinCutoff,
1507 ImplByRelocation) noexcept {
1508 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
1509 memcpy(&contents_, &goner, sizeof(T));
1510 sequencer_.completeTurn(turn * 2);
1514 /// dequeue by destructing followed by relocation. This version is preferred,
1515 /// because as much work as possible can be done before waiting.
1516 void dequeueImpl(uint32_t turn,
1517 Atom<uint32_t>& spinCutoff,
1518 const bool updateSpinCutoff,
1520 ImplByRelocation) noexcept {
1524 // unlikely, but if we don't complete our turn the queue will die
1526 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
1527 memcpy(&elem, &contents_, sizeof(T));
1528 sequencer_.completeTurn(turn * 2 + 1);
1531 /// dequeue by nothrow move assignment.
1532 void dequeueImpl(uint32_t turn,
1533 Atom<uint32_t>& spinCutoff,
1534 const bool updateSpinCutoff,
1536 ImplByMove) noexcept {
1537 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
1538 elem = std::move(*ptr());
1540 sequencer_.completeTurn(turn * 2 + 1);
1544 } // namespace detail
1546 } // namespace folly