2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
22 #include <boost/noncopyable.hpp>
25 #include <type_traits>
27 #include <folly/Traits.h>
28 #include <folly/detail/CacheLocality.h>
29 #include <folly/detail/TurnSequencer.h>
30 #include <folly/portability/Unistd.h>
36 template<typename T, template<typename> class Atom>
37 struct SingleElementQueue;
39 template <typename T> class MPMCPipelineStageImpl;
43 /// MPMCQueue<T> is a high-performance bounded concurrent queue that
44 /// supports multiple producers, multiple consumers, and optional blocking.
45 /// The queue has a fixed capacity, for which all memory will be allocated
46 /// up front. The bulk of the work of enqueuing and dequeuing can be
47 /// performed in parallel.
49 /// MPMCQueue is linearizable. That means that if a call to write(A)
50 /// returns before a call to write(B) begins, then A will definitely end up
51 /// in the queue before B, and if a call to read(X) returns before a call
52 /// to read(Y) is started, that X will be something from earlier in the
53 /// queue than Y. This also means that if a read call returns a value, you
54 /// can be sure that all previous elements of the queue have been assigned
55 /// a reader (that reader might not yet have returned, but it exists).
57 /// The underlying implementation uses a ticket dispenser for the head and
58 /// the tail, spreading accesses across N single-element queues to produce
59 /// a queue with capacity N. The ticket dispensers use atomic increment,
60 /// which is more robust to contention than a CAS loop. Each of the
61 /// single-element queues uses its own CAS to serialize access, with an
62 /// adaptive spin cutoff. When spinning fails on a single-element queue
63 /// it uses futex()'s _BITSET operations to reduce unnecessary wakeups
64 /// even if multiple waiters are present on an individual queue (such as
65 /// when the MPMCQueue's capacity is smaller than the number of enqueuers
68 /// In benchmarks (contained in tao/queues/ConcurrentQueueTests)
69 /// it handles 1 to 1, 1 to N, N to 1, and N to M thread counts better
70 /// than any of the alternatives present in fbcode, for both small (~10)
71 /// and large capacities. In these benchmarks it is also faster than
72 /// tbb::concurrent_bounded_queue for all configurations. When there are
73 /// many more threads than cores, MPMCQueue is _much_ faster than the tbb
74 /// queue because it uses futex() to block and unblock waiting threads,
75 /// rather than spinning with sched_yield.
77 /// NOEXCEPT INTERACTION: tl;dr; If it compiles you're fine. Ticket-based
78 /// queues separate the assignment of queue positions from the actual
79 /// construction of the in-queue elements, which means that the T
80 /// constructor used during enqueue must not throw an exception. This is
81 /// enforced at compile time using type traits, which requires that T be
82 /// adorned with accurate noexcept information. If your type does not
83 /// use noexcept, you will have to wrap it in something that provides
84 /// the guarantee. We provide an alternate safe implementation for types
85 /// that don't use noexcept but that are marked folly::IsRelocatable
86 /// and boost::has_nothrow_constructor, which is common for folly types.
87 /// In particular, if you can declare FOLLY_ASSUME_FBVECTOR_COMPATIBLE
88 /// then your type can be put in MPMCQueue.
90 /// If you have a pool of N queue consumers that you want to shut down
91 /// after the queue has drained, one way is to enqueue N sentinel values
92 /// to the queue. If the producer doesn't know how many consumers there
93 /// are you can enqueue one sentinel and then have each consumer requeue
94 /// two sentinels after it receives it (by requeuing 2 the shutdown can
95 /// complete in O(log P) time instead of O(P)).
97 template<typename> class Atom = std::atomic>
98 class MPMCQueue : boost::noncopyable {
100 static_assert(std::is_nothrow_constructible<T,T&&>::value ||
101 folly::IsRelocatable<T>::value,
102 "T must be relocatable or have a noexcept move constructor");
104 friend class detail::MPMCPipelineStageImpl<T>;
106 typedef T value_type;
108 explicit MPMCQueue(size_t queueCapacity)
109 : capacity_(queueCapacity)
115 if (queueCapacity == 0)
116 throw std::invalid_argument(
117 "MPMCQueue with explicit capacity 0 is impossible"
120 // would sigfpe if capacity is 0
121 stride_ = computeStride(queueCapacity);
122 slots_ = new detail::SingleElementQueue<T,Atom>[queueCapacity +
125 // ideally this would be a static assert, but g++ doesn't allow it
126 assert(alignof(MPMCQueue<T,Atom>)
127 >= detail::CacheLocality::kFalseSharingRange);
128 assert(static_cast<uint8_t*>(static_cast<void*>(&popTicket_))
129 - static_cast<uint8_t*>(static_cast<void*>(&pushTicket_))
130 >= detail::CacheLocality::kFalseSharingRange);
133 /// A default-constructed queue is useful because a usable (non-zero
134 /// capacity) queue can be moved onto it or swapped with it
145 /// IMPORTANT: The move constructor is here to make it easier to perform
146 /// the initialization phase, it is not safe to use when there are any
147 /// concurrent accesses (this is not checked).
148 MPMCQueue(MPMCQueue<T,Atom>&& rhs) noexcept
149 : capacity_(rhs.capacity_)
151 , stride_(rhs.stride_)
152 , pushTicket_(rhs.pushTicket_.load(std::memory_order_relaxed))
153 , popTicket_(rhs.popTicket_.load(std::memory_order_relaxed))
154 , pushSpinCutoff_(rhs.pushSpinCutoff_.load(std::memory_order_relaxed))
155 , popSpinCutoff_(rhs.popSpinCutoff_.load(std::memory_order_relaxed))
157 // relaxed ops are okay for the previous reads, since rhs queue can't
158 // be in concurrent use
162 rhs.slots_ = nullptr;
164 rhs.pushTicket_.store(0, std::memory_order_relaxed);
165 rhs.popTicket_.store(0, std::memory_order_relaxed);
166 rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed);
167 rhs.popSpinCutoff_.store(0, std::memory_order_relaxed);
170 /// IMPORTANT: The move operator is here to make it easier to perform
171 /// the initialization phase, it is not safe to use when there are any
172 /// concurrent accesses (this is not checked).
173 MPMCQueue<T,Atom> const& operator= (MPMCQueue<T,Atom>&& rhs) {
176 new (this) MPMCQueue(std::move(rhs));
181 /// MPMCQueue can only be safely destroyed when there are no
182 /// pending enqueuers or dequeuers (this is not checked).
187 /// Returns the number of successful reads minus the number of successful
188 /// writes. Waiting blockingRead and blockingWrite calls are included,
189 /// so this value can be negative.
190 ssize_t size() const noexcept {
191 // since both pushes and pops increase monotonically, we can get a
192 // consistent snapshot either by bracketing a read of popTicket_ with
193 // two reads of pushTicket_ that return the same value, or the other
194 // way around. We maximize our chances by alternately attempting
196 uint64_t pushes = pushTicket_.load(std::memory_order_acquire); // A
197 uint64_t pops = popTicket_.load(std::memory_order_acquire); // B
199 uint64_t nextPushes = pushTicket_.load(std::memory_order_acquire); // C
200 if (pushes == nextPushes) {
201 // pushTicket_ didn't change from A (or the previous C) to C,
202 // so we can linearize at B (or D)
203 return pushes - pops;
206 uint64_t nextPops = popTicket_.load(std::memory_order_acquire); // D
207 if (pops == nextPops) {
208 // popTicket_ didn't chance from B (or the previous D), so we
209 // can linearize at C
210 return pushes - pops;
216 /// Returns true if there are no items available for dequeue
217 bool isEmpty() const noexcept {
221 /// Returns true if there is currently no empty space to enqueue
222 bool isFull() const noexcept {
223 // careful with signed -> unsigned promotion, since size can be negative
224 return size() >= static_cast<ssize_t>(capacity_);
227 /// Returns is a guess at size() for contexts that don't need a precise
228 /// value, such as stats.
229 ssize_t sizeGuess() const noexcept {
230 return writeCount() - readCount();
234 size_t capacity() const noexcept {
238 /// Returns the total number of calls to blockingWrite or successful
239 /// calls to write, including those blockingWrite calls that are
240 /// currently blocking
241 uint64_t writeCount() const noexcept {
242 return pushTicket_.load(std::memory_order_acquire);
245 /// Returns the total number of calls to blockingRead or successful
246 /// calls to read, including those blockingRead calls that are currently
248 uint64_t readCount() const noexcept {
249 return popTicket_.load(std::memory_order_acquire);
252 /// Enqueues a T constructed from args, blocking until space is
253 /// available. Note that this method signature allows enqueue via
254 /// move, if args is a T rvalue, via copy, if args is a T lvalue, or
255 /// via emplacement if args is an initializer list that can be passed
256 /// to a T constructor.
257 template <typename ...Args>
258 void blockingWrite(Args&&... args) noexcept {
259 enqueueWithTicket(pushTicket_++, std::forward<Args>(args)...);
262 /// If an item can be enqueued with no blocking, does so and returns
263 /// true, otherwise returns false. This method is similar to
264 /// writeIfNotFull, but if you don't have a specific need for that
265 /// method you should use this one.
267 /// One of the common usages of this method is to enqueue via the
268 /// move constructor, something like q.write(std::move(x)). If write
269 /// returns false because the queue is full then x has not actually been
270 /// consumed, which looks strange. To understand why it is actually okay
271 /// to use x afterward, remember that std::move is just a typecast that
272 /// provides an rvalue reference that enables use of a move constructor
273 /// or operator. std::move doesn't actually move anything. It could
274 /// more accurately be called std::rvalue_cast or std::move_permission.
275 template <typename ...Args>
276 bool write(Args&&... args) noexcept {
278 if (tryObtainReadyPushTicket(ticket)) {
279 // we have pre-validated that the ticket won't block
280 enqueueWithTicket(ticket, std::forward<Args>(args)...);
287 template <class Clock, typename... Args>
288 bool tryWriteUntil(const std::chrono::time_point<Clock>& when,
289 Args&&... args) noexcept {
291 if (tryObtainPromisedPushTicketUntil(ticket, when)) {
292 // we have pre-validated that the ticket won't block, or rather that
293 // it won't block longer than it takes another thread to dequeue an
294 // element from the slot it identifies.
295 enqueueWithTicket(ticket, std::forward<Args>(args)...);
302 /// If the queue is not full, enqueues and returns true, otherwise
303 /// returns false. Unlike write this method can be blocked by another
304 /// thread, specifically a read that has linearized (been assigned
305 /// a ticket) but not yet completed. If you don't really need this
306 /// function you should probably use write.
308 /// MPMCQueue isn't lock-free, so just because a read operation has
309 /// linearized (and isFull is false) doesn't mean that space has been
310 /// made available for another write. In this situation write will
311 /// return false, but writeIfNotFull will wait for the dequeue to finish.
312 /// This method is required if you are composing queues and managing
313 /// your own wakeup, because it guarantees that after every successful
314 /// write a readIfNotEmpty will succeed.
315 template <typename ...Args>
316 bool writeIfNotFull(Args&&... args) noexcept {
318 if (tryObtainPromisedPushTicket(ticket)) {
319 // some other thread is already dequeuing the slot into which we
320 // are going to enqueue, but we might have to wait for them to finish
321 enqueueWithTicket(ticket, std::forward<Args>(args)...);
328 /// Moves a dequeued element onto elem, blocking until an element
330 void blockingRead(T& elem) noexcept {
331 dequeueWithTicket(popTicket_++, elem);
334 /// If an item can be dequeued with no blocking, does so and returns
335 /// true, otherwise returns false.
336 bool read(T& elem) noexcept {
338 if (tryObtainReadyPopTicket(ticket)) {
339 // the ticket has been pre-validated to not block
340 dequeueWithTicket(ticket, elem);
347 /// If the queue is not empty, dequeues and returns true, otherwise
348 /// returns false. If the matching write is still in progress then this
349 /// method may block waiting for it. If you don't rely on being able
350 /// to dequeue (such as by counting completed write) then you should
352 bool readIfNotEmpty(T& elem) noexcept {
354 if (tryObtainPromisedPopTicket(ticket)) {
355 // the matching enqueue already has a ticket, but might not be done
356 dequeueWithTicket(ticket, elem);
365 /// Once every kAdaptationFreq we will spin longer, to try to estimate
366 /// the proper spin backoff
367 kAdaptationFreq = 128,
369 /// To avoid false sharing in slots_ with neighboring memory
370 /// allocations, we pad it with this many SingleElementQueue-s at
372 kSlotPadding = (detail::CacheLocality::kFalseSharingRange - 1)
373 / sizeof(detail::SingleElementQueue<T,Atom>) + 1
376 /// The maximum number of items in the queue at once
377 size_t FOLLY_ALIGN_TO_AVOID_FALSE_SHARING capacity_;
379 /// An array of capacity_ SingleElementQueue-s, each of which holds
380 /// either 0 or 1 item. We over-allocate by 2 * kSlotPadding and don't
381 /// touch the slots at either end, to avoid false sharing
382 detail::SingleElementQueue<T,Atom>* slots_;
384 /// The number of slots_ indices that we advance for each ticket, to
385 /// avoid false sharing. Ideally slots_[i] and slots_[i + stride_]
386 /// aren't on the same cache line
389 /// Enqueuers get tickets from here
390 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushTicket_;
392 /// Dequeuers get tickets from here
393 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popTicket_;
395 /// This is how many times we will spin before using FUTEX_WAIT when
396 /// the queue is full on enqueue, adaptively computed by occasionally
397 /// spinning for longer and smoothing with an exponential moving average
398 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_;
400 /// The adaptive spin cutoff when the queue is empty on dequeue
401 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_;
403 /// Alignment doesn't prevent false sharing at the end of the struct,
404 /// so fill out the last cache line
405 char padding_[detail::CacheLocality::kFalseSharingRange -
406 sizeof(Atom<uint32_t>)];
409 /// We assign tickets in increasing order, but we don't want to
410 /// access neighboring elements of slots_ because that will lead to
411 /// false sharing (multiple cores accessing the same cache line even
412 /// though they aren't accessing the same bytes in that cache line).
413 /// To avoid this we advance by stride slots per ticket.
415 /// We need gcd(capacity, stride) to be 1 so that we will use all
416 /// of the slots. We ensure this by only considering prime strides,
417 /// which either have no common divisors with capacity or else have
418 /// a zero remainder after dividing by capacity. That is sufficient
419 /// to guarantee correctness, but we also want to actually spread the
420 /// accesses away from each other to avoid false sharing (consider a
421 /// stride of 7 with a capacity of 8). To that end we try a few taking
422 /// care to observe that advancing by -1 is as bad as advancing by 1
423 /// when in comes to false sharing.
425 /// The simple way to avoid false sharing would be to pad each
426 /// SingleElementQueue, but since we have capacity_ of them that could
427 /// waste a lot of space.
428 static int computeStride(size_t capacity) noexcept {
429 static const int smallPrimes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23 };
433 for (int stride : smallPrimes) {
434 if ((stride % capacity) == 0 || (capacity % stride) == 0) {
437 size_t sep = stride % capacity;
438 sep = std::min(sep, capacity - sep);
447 /// Returns the index into slots_ that should be used when enqueuing or
448 /// dequeuing with the specified ticket
449 size_t idx(uint64_t ticket) noexcept {
450 return ((ticket * stride_) % capacity_) + kSlotPadding;
453 /// Maps an enqueue or dequeue ticket to the turn should be used at the
454 /// corresponding SingleElementQueue
455 uint32_t turn(uint64_t ticket) noexcept {
456 return ticket / capacity_;
459 /// Tries to obtain a push ticket for which SingleElementQueue::enqueue
460 /// won't block. Returns true on immediate success, false on immediate
462 bool tryObtainReadyPushTicket(uint64_t& rv) noexcept {
463 auto ticket = pushTicket_.load(std::memory_order_acquire); // A
465 if (!slots_[idx(ticket)].mayEnqueue(turn(ticket))) {
466 // if we call enqueue(ticket, ...) on the SingleElementQueue
467 // right now it would block, but this might no longer be the next
468 // ticket. We can increase the chance of tryEnqueue success under
469 // contention (without blocking) by rechecking the ticket dispenser
471 ticket = pushTicket_.load(std::memory_order_acquire); // B
472 if (prev == ticket) {
473 // mayEnqueue was bracketed by two reads (A or prev B or prev
474 // failing CAS to B), so we are definitely unable to enqueue
478 // we will bracket the mayEnqueue check with a read (A or prev B
479 // or prev failing CAS) and the following CAS. If the CAS fails
480 // it will effect a load of pushTicket_
481 if (pushTicket_.compare_exchange_strong(ticket, ticket + 1)) {
489 /// Tries until when to obtain a push ticket for which
490 /// SingleElementQueue::enqueue won't block. Returns true on success, false
492 /// ticket is filled on success AND failure.
493 template <class Clock>
494 bool tryObtainPromisedPushTicketUntil(
495 uint64_t& ticket, const std::chrono::time_point<Clock>& when) noexcept {
496 bool deadlineReached = false;
497 while (!deadlineReached) {
498 if (tryObtainPromisedPushTicket(ticket)) {
501 // ticket is a blocking ticket until the preceding ticket has been
502 // processed: wait until this ticket's turn arrives. We have not reserved
503 // this ticket so we will have to re-attempt to get a non-blocking ticket
504 // if we wake up before we time-out.
505 deadlineReached = !slots_[idx(ticket)].tryWaitForEnqueueTurnUntil(
506 turn(ticket), pushSpinCutoff_, (ticket % kAdaptationFreq) == 0, when);
511 /// Tries to obtain a push ticket which can be satisfied if all
512 /// in-progress pops complete. This function does not block, but
513 /// blocking may be required when using the returned ticket if some
514 /// other thread's pop is still in progress (ticket has been granted but
515 /// pop has not yet completed).
516 bool tryObtainPromisedPushTicket(uint64_t& rv) noexcept {
517 auto numPushes = pushTicket_.load(std::memory_order_acquire); // A
519 auto numPops = popTicket_.load(std::memory_order_acquire); // B
520 // n will be negative if pops are pending
521 int64_t n = numPushes - numPops;
523 if (n >= static_cast<ssize_t>(capacity_)) {
524 // Full, linearize at B. We don't need to recheck the read we
525 // performed at A, because if numPushes was stale at B then the
526 // real numPushes value is even worse
529 if (pushTicket_.compare_exchange_strong(numPushes, numPushes + 1)) {
535 /// Tries to obtain a pop ticket for which SingleElementQueue::dequeue
536 /// won't block. Returns true on immediate success, false on immediate
538 bool tryObtainReadyPopTicket(uint64_t& rv) noexcept {
539 auto ticket = popTicket_.load(std::memory_order_acquire);
541 if (!slots_[idx(ticket)].mayDequeue(turn(ticket))) {
543 ticket = popTicket_.load(std::memory_order_acquire);
544 if (prev == ticket) {
548 if (popTicket_.compare_exchange_strong(ticket, ticket + 1)) {
556 /// Similar to tryObtainReadyPopTicket, but returns a pop ticket whose
557 /// corresponding push ticket has already been handed out, rather than
558 /// returning one whose corresponding push ticket has already been
559 /// completed. This means that there is a possibility that the caller
560 /// will block when using the ticket, but it allows the user to rely on
561 /// the fact that if enqueue has succeeded, tryObtainPromisedPopTicket
562 /// will return true. The "try" part of this is that we won't have
563 /// to block waiting for someone to call enqueue, although we might
564 /// have to block waiting for them to finish executing code inside the
565 /// MPMCQueue itself.
566 bool tryObtainPromisedPopTicket(uint64_t& rv) noexcept {
567 auto numPops = popTicket_.load(std::memory_order_acquire); // A
569 auto numPushes = pushTicket_.load(std::memory_order_acquire); // B
570 if (numPops >= numPushes) {
571 // Empty, or empty with pending pops. Linearize at B. We don't
572 // need to recheck the read we performed at A, because if numPops
573 // is stale then the fresh value is larger and the >= is still true
576 if (popTicket_.compare_exchange_strong(numPops, numPops + 1)) {
583 // Given a ticket, constructs an enqueued item using args
584 template <typename ...Args>
585 void enqueueWithTicket(uint64_t ticket, Args&&... args) noexcept {
586 slots_[idx(ticket)].enqueue(turn(ticket),
588 (ticket % kAdaptationFreq) == 0,
589 std::forward<Args>(args)...);
592 // Given a ticket, dequeues the corresponding element
593 void dequeueWithTicket(uint64_t ticket, T& elem) noexcept {
594 slots_[idx(ticket)].dequeue(turn(ticket),
596 (ticket % kAdaptationFreq) == 0,
604 /// SingleElementQueue implements a blocking queue that holds at most one
605 /// item, and that requires its users to assign incrementing identifiers
606 /// (turns) to each enqueue and dequeue operation. Note that the turns
607 /// used by SingleElementQueue are doubled inside the TurnSequencer
608 template <typename T, template <typename> class Atom>
609 struct SingleElementQueue {
611 ~SingleElementQueue() noexcept {
612 if ((sequencer_.uncompletedTurnLSB() & 1) == 1) {
613 // we are pending a dequeue, so we have a constructed item
618 /// enqueue using in-place noexcept construction
619 template <typename ...Args,
620 typename = typename std::enable_if<
621 std::is_nothrow_constructible<T,Args...>::value>::type>
622 void enqueue(const uint32_t turn,
623 Atom<uint32_t>& spinCutoff,
624 const bool updateSpinCutoff,
625 Args&&... args) noexcept {
626 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
627 new (&contents_) T(std::forward<Args>(args)...);
628 sequencer_.completeTurn(turn * 2);
631 /// enqueue using move construction, either real (if
632 /// is_nothrow_move_constructible) or simulated using relocation and
633 /// default construction (if IsRelocatable and has_nothrow_constructor)
634 template <typename = typename std::enable_if<
635 (folly::IsRelocatable<T>::value &&
636 boost::has_nothrow_constructor<T>::value) ||
637 std::is_nothrow_constructible<T, T&&>::value>::type>
638 void enqueue(const uint32_t turn,
639 Atom<uint32_t>& spinCutoff,
640 const bool updateSpinCutoff,
641 T&& goner) noexcept {
647 typename std::conditional<std::is_nothrow_constructible<T,T&&>::value,
648 ImplByMove, ImplByRelocation>::type());
651 /// Waits until either:
652 /// 1: the dequeue turn preceding the given enqueue turn has arrived
653 /// 2: the given deadline has arrived
654 /// Case 1 returns true, case 2 returns false.
655 template <class Clock>
656 bool tryWaitForEnqueueTurnUntil(
658 Atom<uint32_t>& spinCutoff,
659 const bool updateSpinCutoff,
660 const std::chrono::time_point<Clock>& when) noexcept {
661 return sequencer_.tryWaitForTurn(
662 turn * 2, spinCutoff, updateSpinCutoff, &when);
665 bool mayEnqueue(const uint32_t turn) const noexcept {
666 return sequencer_.isTurn(turn * 2);
669 void dequeue(uint32_t turn,
670 Atom<uint32_t>& spinCutoff,
671 const bool updateSpinCutoff,
677 typename std::conditional<folly::IsRelocatable<T>::value,
679 ImplByMove>::type());
682 bool mayDequeue(const uint32_t turn) const noexcept {
683 return sequencer_.isTurn(turn * 2 + 1);
687 /// Storage for a T constructed with placement new
688 typename std::aligned_storage<sizeof(T),alignof(T)>::type contents_;
690 /// Even turns are pushes, odd turns are pops
691 TurnSequencer<Atom> sequencer_;
694 return static_cast<T*>(static_cast<void*>(&contents_));
697 void destroyContents() noexcept {
701 // g++ doesn't seem to have std::is_nothrow_destructible yet
704 memset(&contents_, 'Q', sizeof(T));
708 /// Tag classes for dispatching to enqueue/dequeue implementation.
709 struct ImplByRelocation {};
710 struct ImplByMove {};
712 /// enqueue using nothrow move construction.
713 void enqueueImpl(const uint32_t turn,
714 Atom<uint32_t>& spinCutoff,
715 const bool updateSpinCutoff,
717 ImplByMove) noexcept {
718 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
719 new (&contents_) T(std::move(goner));
720 sequencer_.completeTurn(turn * 2);
723 /// enqueue by simulating nothrow move with relocation, followed by
724 /// default construction to a noexcept relocation.
725 void enqueueImpl(const uint32_t turn,
726 Atom<uint32_t>& spinCutoff,
727 const bool updateSpinCutoff,
729 ImplByRelocation) noexcept {
730 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
731 memcpy(&contents_, &goner, sizeof(T));
732 sequencer_.completeTurn(turn * 2);
736 /// dequeue by destructing followed by relocation. This version is preferred,
737 /// because as much work as possible can be done before waiting.
738 void dequeueImpl(uint32_t turn,
739 Atom<uint32_t>& spinCutoff,
740 const bool updateSpinCutoff,
742 ImplByRelocation) noexcept {
746 // unlikely, but if we don't complete our turn the queue will die
748 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
749 memcpy(&elem, &contents_, sizeof(T));
750 sequencer_.completeTurn(turn * 2 + 1);
753 /// dequeue by nothrow move assignment.
754 void dequeueImpl(uint32_t turn,
755 Atom<uint32_t>& spinCutoff,
756 const bool updateSpinCutoff,
758 ImplByMove) noexcept {
759 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
760 elem = std::move(*ptr());
762 sequencer_.completeTurn(turn * 2 + 1);
766 } // namespace detail