2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
22 #include <boost/noncopyable.hpp>
25 #include <type_traits>
27 #include <folly/Traits.h>
28 #include <folly/detail/CacheLocality.h>
29 #include <folly/detail/TurnSequencer.h>
30 #include <folly/portability/Unistd.h>
36 template<typename T, template<typename> class Atom>
37 struct SingleElementQueue;
39 template <typename T> class MPMCPipelineStageImpl;
41 /// MPMCQueue base CRTP template
42 template <typename> class MPMCQueueBase;
46 /// MPMCQueue<T> is a high-performance bounded concurrent queue that
47 /// supports multiple producers, multiple consumers, and optional blocking.
48 /// The queue has a fixed capacity, for which all memory will be allocated
49 /// up front. The bulk of the work of enqueuing and dequeuing can be
50 /// performed in parallel.
52 /// MPMCQueue is linearizable. That means that if a call to write(A)
53 /// returns before a call to write(B) begins, then A will definitely end up
54 /// in the queue before B, and if a call to read(X) returns before a call
55 /// to read(Y) is started, that X will be something from earlier in the
56 /// queue than Y. This also means that if a read call returns a value, you
57 /// can be sure that all previous elements of the queue have been assigned
58 /// a reader (that reader might not yet have returned, but it exists).
60 /// The underlying implementation uses a ticket dispenser for the head and
61 /// the tail, spreading accesses across N single-element queues to produce
62 /// a queue with capacity N. The ticket dispensers use atomic increment,
63 /// which is more robust to contention than a CAS loop. Each of the
64 /// single-element queues uses its own CAS to serialize access, with an
65 /// adaptive spin cutoff. When spinning fails on a single-element queue
66 /// it uses futex()'s _BITSET operations to reduce unnecessary wakeups
67 /// even if multiple waiters are present on an individual queue (such as
68 /// when the MPMCQueue's capacity is smaller than the number of enqueuers
71 /// In benchmarks (contained in tao/queues/ConcurrentQueueTests)
72 /// it handles 1 to 1, 1 to N, N to 1, and N to M thread counts better
73 /// than any of the alternatives present in fbcode, for both small (~10)
74 /// and large capacities. In these benchmarks it is also faster than
75 /// tbb::concurrent_bounded_queue for all configurations. When there are
76 /// many more threads than cores, MPMCQueue is _much_ faster than the tbb
77 /// queue because it uses futex() to block and unblock waiting threads,
78 /// rather than spinning with sched_yield.
80 /// NOEXCEPT INTERACTION: tl;dr; If it compiles you're fine. Ticket-based
81 /// queues separate the assignment of queue positions from the actual
82 /// construction of the in-queue elements, which means that the T
83 /// constructor used during enqueue must not throw an exception. This is
84 /// enforced at compile time using type traits, which requires that T be
85 /// adorned with accurate noexcept information. If your type does not
86 /// use noexcept, you will have to wrap it in something that provides
87 /// the guarantee. We provide an alternate safe implementation for types
88 /// that don't use noexcept but that are marked folly::IsRelocatable
89 /// and boost::has_nothrow_constructor, which is common for folly types.
90 /// In particular, if you can declare FOLLY_ASSUME_FBVECTOR_COMPATIBLE
91 /// then your type can be put in MPMCQueue.
93 /// If you have a pool of N queue consumers that you want to shut down
94 /// after the queue has drained, one way is to enqueue N sentinel values
95 /// to the queue. If the producer doesn't know how many consumers there
96 /// are you can enqueue one sentinel and then have each consumer requeue
97 /// two sentinels after it receives it (by requeuing 2 the shutdown can
98 /// complete in O(log P) time instead of O(P)).
99 template<typename T, template<typename> class Atom = std::atomic,
100 bool Dynamic = false>
101 class MPMCQueue : public detail::MPMCQueueBase<MPMCQueue<T,Atom,Dynamic>> {
102 friend class detail::MPMCPipelineStageImpl<T>;
103 using Slot = detail::SingleElementQueue<T,Atom>;
106 explicit MPMCQueue(size_t queueCapacity)
107 : detail::MPMCQueueBase<MPMCQueue<T,Atom,Dynamic>>(queueCapacity)
109 this->stride_ = this->computeStride(queueCapacity);
110 this->slots_ = new Slot[queueCapacity + 2 * this->kSlotPadding];
113 MPMCQueue() noexcept { }
116 /// The dynamic version of MPMCQueue allows dynamic expansion of queue
117 /// capacity, such that a queue may start with a smaller capacity than
118 /// specified and expand only if needed. Users may optionally specify
119 /// the initial capacity and the expansion multiplier.
121 /// The design uses a seqlock to enforce mutual exclusion among
122 /// expansion attempts. Regular operations read up-to-date queue
123 /// information (slots array, capacity, stride) inside read-only
124 /// seqlock sections, which are unimpeded when no expansion is in
127 /// An expansion computes a new capacity, allocates a new slots array,
128 /// and updates stride. No information needs to be copied from the
129 /// current slots array to the new one. When this happens, new slots
130 /// will not have sequence numbers that match ticket numbers. The
131 /// expansion needs to compute a ticket offset such that operations
132 /// that use new arrays can adjust the calculations of slot indexes
133 /// and sequence numbers that take into account that the new slots
134 /// start with sequence numbers of zero. The current ticket offset is
135 /// packed with the seqlock in an atomic 64-bit integer. The initial
138 /// Lagging write and read operations with tickets lower than the
139 /// ticket offset of the current slots array (i.e., the minimum ticket
140 /// number that can be served by the current array) must use earlier
141 /// closed arrays instead of the current one. Information about closed
142 /// slots arrays (array address, capacity, stride, and offset) is
143 /// maintained in a logarithmic-sized structure. Each entry in that
144 /// structure never need to be changed once set. The number of closed
145 /// arrays is half the value of the seqlock (when unlocked).
147 /// The acquisition of the seqlock to perform an expansion does not
148 /// prevent the issuing of new push and pop tickets concurrently. The
149 /// expansion must set the new ticket offset to a value that couldn't
150 /// have been issued to an operation that has already gone through a
151 /// seqlock read-only section (and hence obtained information for
152 /// older closed arrays).
154 /// Note that the total queue capacity can temporarily exceed the
155 /// specified capacity when there are lagging consumers that haven't
156 /// yet consumed all the elements in closed arrays. Users should not
157 /// rely on the capacity of dynamic queues for synchronization, e.g.,
158 /// they should not expect that a thread will definitely block on a
159 /// call to blockingWrite() when the queue size is known to be equal
162 /// The dynamic version is a partial specialization of MPMCQueue with
164 template <typename T, template<typename> class Atom>
165 class MPMCQueue<T,Atom,true> :
166 public detail::MPMCQueueBase<MPMCQueue<T,Atom,true>> {
167 friend class detail::MPMCQueueBase<MPMCQueue<T,Atom,true>>;
168 using Slot = detail::SingleElementQueue<T,Atom>;
171 uint64_t offset_ {0};
172 Slot* slots_ {nullptr};
173 size_t capacity_ {0};
179 explicit MPMCQueue(size_t queueCapacity)
180 : detail::MPMCQueueBase<MPMCQueue<T,Atom,true>>(queueCapacity)
182 size_t cap = std::min<size_t>(kDefaultMinDynamicCapacity, queueCapacity);
183 initQueue(cap, kDefaultExpansionMultiplier);
186 explicit MPMCQueue(size_t queueCapacity,
188 size_t expansionMultiplier)
189 : detail::MPMCQueueBase<MPMCQueue<T,Atom,true>>(queueCapacity)
191 minCapacity = std::max<size_t>(1, minCapacity);
192 size_t cap = std::min<size_t>(minCapacity, queueCapacity);
193 expansionMultiplier = std::max<size_t>(2, expansionMultiplier);
194 initQueue(cap, expansionMultiplier);
197 MPMCQueue() noexcept {
202 MPMCQueue(MPMCQueue<T,Atom,true>&& rhs) noexcept {
203 this->capacity_ = rhs.capacity_;
204 this->slots_ = rhs.slots_;
205 this->stride_ = rhs.stride_;
206 this->dstate_.store(rhs.dstate_.load(std::memory_order_relaxed),
207 std::memory_order_relaxed);
208 this->dcapacity_.store(rhs.dcapacity_.load(std::memory_order_relaxed),
209 std::memory_order_relaxed);
210 this->pushTicket_.store(rhs.pushTicket_.load(std::memory_order_relaxed),
211 std::memory_order_relaxed);
212 this->popTicket_.store(rhs.popTicket_.load(std::memory_order_relaxed),
213 std::memory_order_relaxed);
214 this->pushSpinCutoff_.store(
215 rhs.pushSpinCutoff_.load(std::memory_order_relaxed),
216 std::memory_order_relaxed);
217 this->popSpinCutoff_.store(
218 rhs.popSpinCutoff_.load(std::memory_order_relaxed),
219 std::memory_order_relaxed);
221 closed_ = rhs.closed_;
224 rhs.slots_ = nullptr;
226 rhs.dstate_.store(0, std::memory_order_relaxed);
227 rhs.dcapacity_.store(0, std::memory_order_relaxed);
228 rhs.pushTicket_.store(0, std::memory_order_relaxed);
229 rhs.popTicket_.store(0, std::memory_order_relaxed);
230 rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed);
231 rhs.popSpinCutoff_.store(0, std::memory_order_relaxed);
233 rhs.closed_ = nullptr;
236 MPMCQueue<T,Atom, true> const& operator= (MPMCQueue<T,Atom, true>&& rhs) {
239 new (this) MPMCQueue(std::move(rhs));
245 if (closed_ != nullptr) {
246 for (int i = getNumClosed(this->dstate_.load()) - 1; i >= 0; --i) {
247 delete[] closed_[i].slots_;
253 size_t allocatedCapacity() const noexcept {
254 return this->dcapacity_.load(std::memory_order_relaxed);
257 template <typename ...Args>
258 void blockingWrite(Args&&... args) noexcept {
259 uint64_t ticket = this->pushTicket_++;
266 if (!trySeqlockReadSection(state, slots, cap, stride)) {
269 offset = getOffset(state);
270 if (ticket < offset) {
271 // There was an expansion after this ticket was issued.
272 updateFromClosed(state, ticket, offset, slots, cap, stride);
275 if (slots[this->idx((ticket-offset), cap, stride)]
276 .mayEnqueue(this->turn(ticket-offset, cap))) {
277 // A slot is ready. No need to expand.
279 } else if (this->popTicket_.load(std::memory_order_relaxed) + cap
281 // May block, but a pop is in progress. No need to expand.
282 // Get seqlock read section info again in case an expansion
283 // occurred with an equal or higher ticket.
286 // May block. See if we can expand.
287 if (tryExpand(state, cap)) {
288 // This or another thread started an expansion. Get updated info.
296 this->enqueueWithTicketBase(ticket-offset, slots, cap, stride,
297 std::forward<Args>(args)...);
300 void blockingReadWithTicket(uint64_t& ticket, T& elem) noexcept {
301 ticket = this->popTicket_++;
307 while (!trySeqlockReadSection(state, slots, cap, stride));
308 offset = getOffset(state);
309 if (ticket < offset) {
310 // There was an expansion after the corresponding push ticket
312 updateFromClosed(state, ticket, offset, slots, cap, stride);
314 this->dequeueWithTicketBase(ticket-offset, slots, cap, stride, elem);
321 kDefaultMinDynamicCapacity = 10,
322 kDefaultExpansionMultiplier = 10,
327 // Info about closed slots arrays for use by lagging operations
328 ClosedArray* closed_;
330 void initQueue(const size_t cap, const size_t mult) {
331 this->stride_ = this->computeStride(cap);
332 this->slots_ = new Slot[cap + 2 * this->kSlotPadding];
333 this->dstate_.store(0);
334 this->dcapacity_.store(cap);
336 size_t maxClosed = 0;
337 for (size_t expanded = cap;
338 expanded < this->capacity_;
342 closed_ = (maxClosed > 0) ? new ClosedArray[maxClosed] : nullptr;
345 bool tryObtainReadyPushTicket(
346 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
350 ticket = this->pushTicket_.load(std::memory_order_acquire); // A
351 if (!trySeqlockReadSection(state, slots, cap, stride)) {
354 uint64_t offset = getOffset(state);
355 if (ticket < offset) {
356 // There was an expansion with offset greater than this ticket
357 updateFromClosed(state, ticket, offset, slots, cap, stride);
359 if (slots[this->idx((ticket-offset), cap, stride)]
360 .mayEnqueue(this->turn(ticket-offset, cap))) {
362 if (this->pushTicket_.compare_exchange_strong(ticket, ticket + 1)) {
370 if (ticket != this->pushTicket_.load(std::memory_order_relaxed)) { // B
371 // Try again. Ticket changed.
375 // Try to expand unless the ticket is for a closed array
376 if (offset == getOffset(state)) {
377 if (tryExpand(state, cap)) {
378 // This or another thread started an expansion. Get up-to-date info.
387 bool tryObtainPromisedPushTicket(
388 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
392 ticket = this->pushTicket_.load(std::memory_order_acquire);
393 auto numPops = this->popTicket_.load(std::memory_order_acquire);
394 if (!trySeqlockReadSection(state, slots, cap, stride)) {
397 int64_t n = ticket - numPops;
398 if (n >= static_cast<ssize_t>(this->capacity_)) {
401 if ((n >= static_cast<ssize_t>(cap))) {
402 if (tryExpand(state, cap)) {
403 // This or another thread started an expansion. Start over
411 uint64_t offset = getOffset(state);
412 if (ticket < offset) {
413 // There was an expansion with offset greater than this ticket
414 updateFromClosed(state, ticket, offset, slots, cap, stride);
416 if (this->pushTicket_.compare_exchange_strong(ticket, ticket + 1)) {
424 bool tryObtainReadyPopTicket(
425 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
429 ticket = this->popTicket_.load(std::memory_order_relaxed);
430 if (!trySeqlockReadSection(state, slots, cap, stride)) {
433 uint64_t offset = getOffset(state);
434 if (ticket < offset) {
435 // There was an expansion after the corresponding push ticket
437 updateFromClosed(state, ticket, offset, slots, cap, stride);
439 if (slots[this->idx((ticket-offset), cap, stride)]
440 .mayDequeue(this->turn(ticket-offset, cap))) {
441 if (this->popTicket_.compare_exchange_strong(ticket, ticket + 1)) {
452 bool tryObtainPromisedPopTicket(
453 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
457 ticket = this->popTicket_.load(std::memory_order_acquire);
458 auto numPushes = this->pushTicket_.load(std::memory_order_acquire);
459 if (!trySeqlockReadSection(state, slots, cap, stride)) {
462 if (ticket >= numPushes) {
465 if (this->popTicket_.compare_exchange_strong(ticket, ticket + 1)) {
467 uint64_t offset = getOffset(state);
468 if (ticket < offset) {
469 // There was an expansion after the corresponding push
470 // ticket was issued.
471 updateFromClosed(state, ticket, offset, slots, cap, stride);
480 /// Enqueues an element with a specific ticket number
481 template <typename ...Args>
482 void enqueueWithTicket(const uint64_t ticket, Args&&... args) noexcept {
488 while (!trySeqlockReadSection(state, slots, cap, stride)) {}
489 offset = getOffset(state);
490 if (ticket < offset) {
491 // There was an expansion after this ticket was issued.
492 updateFromClosed(state, ticket, offset, slots, cap, stride);
494 this->enqueueWithTicketBase(ticket-offset, slots, cap, stride,
495 std::forward<Args>(args)...);
498 uint64_t getOffset(const uint64_t state) const noexcept {
499 return state >> kSeqlockBits;
502 int getNumClosed(const uint64_t state) const noexcept {
503 return (state & ((1 << kSeqlockBits) - 1)) >> 1;
506 /// Try to expand the queue. Returns true if this expansion was
507 /// successful or a concurent expansion is in progress. Returns
508 /// false if the queue has reached its maximum capacity or
509 /// allocation has failed.
510 bool tryExpand(const uint64_t state, const size_t cap) noexcept {
511 if (cap == this->capacity_) {
515 uint64_t oldval = state;
516 assert((state & 1) == 0);
517 if (this->dstate_.compare_exchange_strong(oldval, state + 1)) {
518 assert(cap == this->dcapacity_.load());
519 uint64_t ticket = 1 + std::max(this->pushTicket_.load(),
520 this->popTicket_.load());
522 std::min(dmult_ * cap, this->capacity_);
524 new (std::nothrow) Slot[newCapacity + 2 * this->kSlotPadding];
525 if (newSlots == nullptr) {
526 // Expansion failed. Restore the seqlock
527 this->dstate_.store(state);
530 // Successful expansion
531 // calculate the current ticket offset
532 uint64_t offset = getOffset(state);
533 // calculate index in closed array
534 int index = getNumClosed(state);
535 assert((index << 1) < (1 << kSeqlockBits));
536 // fill the info for the closed slots array
537 closed_[index].offset_ = offset;
538 closed_[index].slots_ = this->dslots_.load();
539 closed_[index].capacity_ = cap;
540 closed_[index].stride_ = this->dstride_.load();
541 // update the new slots array info
542 this->dslots_.store(newSlots);
543 this->dcapacity_.store(newCapacity);
544 this->dstride_.store(this->computeStride(newCapacity));
545 // Release the seqlock and record the new ticket offset
546 this->dstate_.store((ticket << kSeqlockBits) + (2 * (index + 1)));
548 } else { // failed to acquire seqlock
549 // Someone acaquired the seqlock. Go back to the caller and get
555 /// Seqlock read-only section
556 bool trySeqlockReadSection(
557 uint64_t& state, Slot*& slots, size_t& cap, int& stride
559 state = this->dstate_.load(std::memory_order_acquire);
564 // Start read-only section.
565 slots = this->dslots_.load(std::memory_order_relaxed);
566 cap = this->dcapacity_.load(std::memory_order_relaxed);
567 stride = this->dstride_.load(std::memory_order_relaxed);
568 // End of read-only section. Validate seqlock.
569 std::atomic_thread_fence(std::memory_order_acquire);
570 return (state == this->dstate_.load(std::memory_order_relaxed));
573 /// Update local variables of a lagging operation using the
574 /// most recent closed array with offset <= ticket
575 void updateFromClosed(
576 const uint64_t state, const uint64_t ticket,
577 uint64_t& offset, Slot*& slots, size_t& cap, int& stride
579 for (int i = getNumClosed(state) - 1; i >= 0; --i) {
580 offset = closed_[i].offset_;
581 if (offset <= ticket) {
582 slots = closed_[i].slots_;
583 cap = closed_[i].capacity_;
584 stride = closed_[i].stride_;
588 // A closed array with offset <= ticket should have been found
595 /// CRTP specialization of MPMCQueueBase
598 typename T, template<typename> class Atom, bool Dynamic> class Derived,
599 typename T, template<typename> class Atom, bool Dynamic>
600 class MPMCQueueBase<Derived<T, Atom, Dynamic>> : boost::noncopyable {
602 // Note: Using CRTP static casts in several functions of this base
603 // template instead of making called functions virtual or duplicating
604 // the code of calling functions in the derived partially specialized
607 static_assert(std::is_nothrow_constructible<T,T&&>::value ||
608 folly::IsRelocatable<T>::value,
609 "T must be relocatable or have a noexcept move constructor");
612 typedef T value_type;
614 using Slot = detail::SingleElementQueue<T,Atom>;
616 explicit MPMCQueueBase(size_t queueCapacity)
617 : capacity_(queueCapacity)
623 if (queueCapacity == 0) {
624 throw std::invalid_argument(
625 "MPMCQueue with explicit capacity 0 is impossible"
626 // Stride computation in derived classes would sigfpe if capacity is 0
630 // ideally this would be a static assert, but g++ doesn't allow it
631 assert(alignof(MPMCQueue<T,Atom>)
632 >= detail::CacheLocality::kFalseSharingRange);
633 assert(static_cast<uint8_t*>(static_cast<void*>(&popTicket_))
634 - static_cast<uint8_t*>(static_cast<void*>(&pushTicket_))
635 >= detail::CacheLocality::kFalseSharingRange);
638 /// A default-constructed queue is useful because a usable (non-zero
639 /// capacity) queue can be moved onto it or swapped with it
640 MPMCQueueBase() noexcept
652 /// IMPORTANT: The move constructor is here to make it easier to perform
653 /// the initialization phase, it is not safe to use when there are any
654 /// concurrent accesses (this is not checked).
655 MPMCQueueBase(MPMCQueueBase<Derived<T,Atom,Dynamic>>&& rhs) noexcept
656 : capacity_(rhs.capacity_)
658 , stride_(rhs.stride_)
659 , dstate_(rhs.dstate_.load(std::memory_order_relaxed))
660 , dcapacity_(rhs.dcapacity_.load(std::memory_order_relaxed))
661 , pushTicket_(rhs.pushTicket_.load(std::memory_order_relaxed))
662 , popTicket_(rhs.popTicket_.load(std::memory_order_relaxed))
663 , pushSpinCutoff_(rhs.pushSpinCutoff_.load(std::memory_order_relaxed))
664 , popSpinCutoff_(rhs.popSpinCutoff_.load(std::memory_order_relaxed))
666 // relaxed ops are okay for the previous reads, since rhs queue can't
667 // be in concurrent use
671 rhs.slots_ = nullptr;
673 rhs.dstate_.store(0, std::memory_order_relaxed);
674 rhs.dcapacity_.store(0, std::memory_order_relaxed);
675 rhs.pushTicket_.store(0, std::memory_order_relaxed);
676 rhs.popTicket_.store(0, std::memory_order_relaxed);
677 rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed);
678 rhs.popSpinCutoff_.store(0, std::memory_order_relaxed);
681 /// IMPORTANT: The move operator is here to make it easier to perform
682 /// the initialization phase, it is not safe to use when there are any
683 /// concurrent accesses (this is not checked).
684 MPMCQueueBase<Derived<T,Atom,Dynamic>> const& operator=
685 (MPMCQueueBase<Derived<T,Atom,Dynamic>>&& rhs) {
687 this->~MPMCQueueBase();
688 new (this) MPMCQueueBase(std::move(rhs));
693 /// MPMCQueue can only be safely destroyed when there are no
694 /// pending enqueuers or dequeuers (this is not checked).
699 /// Returns the number of successful reads minus the number of successful
700 /// writes. Waiting blockingRead and blockingWrite calls are included,
701 /// so this value can be negative.
702 ssize_t size() const noexcept {
703 // since both pushes and pops increase monotonically, we can get a
704 // consistent snapshot either by bracketing a read of popTicket_ with
705 // two reads of pushTicket_ that return the same value, or the other
706 // way around. We maximize our chances by alternately attempting
708 uint64_t pushes = pushTicket_.load(std::memory_order_acquire); // A
709 uint64_t pops = popTicket_.load(std::memory_order_acquire); // B
711 uint64_t nextPushes = pushTicket_.load(std::memory_order_acquire); // C
712 if (pushes == nextPushes) {
713 // pushTicket_ didn't change from A (or the previous C) to C,
714 // so we can linearize at B (or D)
715 return pushes - pops;
718 uint64_t nextPops = popTicket_.load(std::memory_order_acquire); // D
719 if (pops == nextPops) {
720 // popTicket_ didn't chance from B (or the previous D), so we
721 // can linearize at C
722 return pushes - pops;
728 /// Returns true if there are no items available for dequeue
729 bool isEmpty() const noexcept {
733 /// Returns true if there is currently no empty space to enqueue
734 bool isFull() const noexcept {
735 // careful with signed -> unsigned promotion, since size can be negative
736 return size() >= static_cast<ssize_t>(capacity_);
739 /// Returns is a guess at size() for contexts that don't need a precise
740 /// value, such as stats.
741 ssize_t sizeGuess() const noexcept {
742 return writeCount() - readCount();
746 size_t capacity() const noexcept {
750 /// Doesn't change for non-dynamic
751 size_t allocatedCapacity() const noexcept {
755 /// Returns the total number of calls to blockingWrite or successful
756 /// calls to write, including those blockingWrite calls that are
757 /// currently blocking
758 uint64_t writeCount() const noexcept {
759 return pushTicket_.load(std::memory_order_acquire);
762 /// Returns the total number of calls to blockingRead or successful
763 /// calls to read, including those blockingRead calls that are currently
765 uint64_t readCount() const noexcept {
766 return popTicket_.load(std::memory_order_acquire);
769 /// Enqueues a T constructed from args, blocking until space is
770 /// available. Note that this method signature allows enqueue via
771 /// move, if args is a T rvalue, via copy, if args is a T lvalue, or
772 /// via emplacement if args is an initializer list that can be passed
773 /// to a T constructor.
774 template <typename ...Args>
775 void blockingWrite(Args&&... args) noexcept {
776 enqueueWithTicketBase(pushTicket_++, slots_, capacity_, stride_,
777 std::forward<Args>(args)...);
780 /// If an item can be enqueued with no blocking, does so and returns
781 /// true, otherwise returns false. This method is similar to
782 /// writeIfNotFull, but if you don't have a specific need for that
783 /// method you should use this one.
785 /// One of the common usages of this method is to enqueue via the
786 /// move constructor, something like q.write(std::move(x)). If write
787 /// returns false because the queue is full then x has not actually been
788 /// consumed, which looks strange. To understand why it is actually okay
789 /// to use x afterward, remember that std::move is just a typecast that
790 /// provides an rvalue reference that enables use of a move constructor
791 /// or operator. std::move doesn't actually move anything. It could
792 /// more accurately be called std::rvalue_cast or std::move_permission.
793 template <typename ...Args>
794 bool write(Args&&... args) noexcept {
799 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
800 tryObtainReadyPushTicket(ticket, slots, cap, stride)) {
801 // we have pre-validated that the ticket won't block
802 enqueueWithTicketBase(ticket, slots, cap, stride,
803 std::forward<Args>(args)...);
810 template <class Clock, typename... Args>
811 bool tryWriteUntil(const std::chrono::time_point<Clock>& when,
812 Args&&... args) noexcept {
817 if (tryObtainPromisedPushTicketUntil(ticket, slots, cap, stride, when)) {
818 // we have pre-validated that the ticket won't block, or rather that
819 // it won't block longer than it takes another thread to dequeue an
820 // element from the slot it identifies.
821 enqueueWithTicketBase(ticket, slots, cap, stride,
822 std::forward<Args>(args)...);
829 /// If the queue is not full, enqueues and returns true, otherwise
830 /// returns false. Unlike write this method can be blocked by another
831 /// thread, specifically a read that has linearized (been assigned
832 /// a ticket) but not yet completed. If you don't really need this
833 /// function you should probably use write.
835 /// MPMCQueue isn't lock-free, so just because a read operation has
836 /// linearized (and isFull is false) doesn't mean that space has been
837 /// made available for another write. In this situation write will
838 /// return false, but writeIfNotFull will wait for the dequeue to finish.
839 /// This method is required if you are composing queues and managing
840 /// your own wakeup, because it guarantees that after every successful
841 /// write a readIfNotEmpty will succeed.
842 template <typename ...Args>
843 bool writeIfNotFull(Args&&... args) noexcept {
848 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
849 tryObtainPromisedPushTicket(ticket, slots, cap, stride)) {
850 // some other thread is already dequeuing the slot into which we
851 // are going to enqueue, but we might have to wait for them to finish
852 enqueueWithTicketBase(ticket, slots, cap, stride,
853 std::forward<Args>(args)...);
860 /// Moves a dequeued element onto elem, blocking until an element
862 void blockingRead(T& elem) noexcept {
864 static_cast<Derived<T,Atom,Dynamic>*>(this)->
865 blockingReadWithTicket(ticket, elem);
868 /// Same as blockingRead() but also records the ticket nunmer
869 void blockingReadWithTicket(uint64_t& ticket, T& elem) noexcept {
870 ticket = popTicket_++;
871 dequeueWithTicketBase(ticket, slots_, capacity_, stride_, elem);
874 /// If an item can be dequeued with no blocking, does so and returns
875 /// true, otherwise returns false.
876 bool read(T& elem) noexcept {
878 return readAndGetTicket(ticket, elem);
881 /// Same as read() but also records the ticket nunmer
882 bool readAndGetTicket(uint64_t& ticket, T& elem) noexcept {
886 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
887 tryObtainReadyPopTicket(ticket, slots, cap, stride)) {
888 // the ticket has been pre-validated to not block
889 dequeueWithTicketBase(ticket, slots, cap, stride, elem);
896 /// If the queue is not empty, dequeues and returns true, otherwise
897 /// returns false. If the matching write is still in progress then this
898 /// method may block waiting for it. If you don't rely on being able
899 /// to dequeue (such as by counting completed write) then you should
901 bool readIfNotEmpty(T& elem) noexcept {
906 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
907 tryObtainPromisedPopTicket(ticket, slots, cap, stride)) {
908 // the matching enqueue already has a ticket, but might not be done
909 dequeueWithTicketBase(ticket, slots, cap, stride, elem);
918 /// Once every kAdaptationFreq we will spin longer, to try to estimate
919 /// the proper spin backoff
920 kAdaptationFreq = 128,
922 /// To avoid false sharing in slots_ with neighboring memory
923 /// allocations, we pad it with this many SingleElementQueue-s at
925 kSlotPadding = (detail::CacheLocality::kFalseSharingRange - 1)
929 /// The maximum number of items in the queue at once
930 size_t FOLLY_ALIGN_TO_AVOID_FALSE_SHARING capacity_;
932 /// Anonymous union for use when Dynamic = false and true, respectively
934 /// An array of capacity_ SingleElementQueue-s, each of which holds
935 /// either 0 or 1 item. We over-allocate by 2 * kSlotPadding and don't
936 /// touch the slots at either end, to avoid false sharing
938 /// Current dynamic slots array of dcapacity_ SingleElementQueue-s
942 /// Anonymous union for use when Dynamic = false and true, respectively
944 /// The number of slots_ indices that we advance for each ticket, to
945 /// avoid false sharing. Ideally slots_[i] and slots_[i + stride_]
946 /// aren't on the same cache line
952 /// The following two memebers are used by dynamic MPMCQueue.
953 /// Ideally they should be in MPMCQueue<T,Atom,true>, but we get
954 /// better cache locality if they are in the same cache line as
955 /// dslots_ and dstride_.
957 /// Dynamic state. A packed seqlock and ticket offset
958 Atom<uint64_t> dstate_;
960 Atom<size_t> dcapacity_;
962 /// Enqueuers get tickets from here
963 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushTicket_;
965 /// Dequeuers get tickets from here
966 Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popTicket_;
968 /// This is how many times we will spin before using FUTEX_WAIT when
969 /// the queue is full on enqueue, adaptively computed by occasionally
970 /// spinning for longer and smoothing with an exponential moving average
971 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_;
973 /// The adaptive spin cutoff when the queue is empty on dequeue
974 Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_;
976 /// Alignment doesn't prevent false sharing at the end of the struct,
977 /// so fill out the last cache line
978 char padding_[detail::CacheLocality::kFalseSharingRange -
979 sizeof(Atom<uint32_t>)];
981 /// We assign tickets in increasing order, but we don't want to
982 /// access neighboring elements of slots_ because that will lead to
983 /// false sharing (multiple cores accessing the same cache line even
984 /// though they aren't accessing the same bytes in that cache line).
985 /// To avoid this we advance by stride slots per ticket.
987 /// We need gcd(capacity, stride) to be 1 so that we will use all
988 /// of the slots. We ensure this by only considering prime strides,
989 /// which either have no common divisors with capacity or else have
990 /// a zero remainder after dividing by capacity. That is sufficient
991 /// to guarantee correctness, but we also want to actually spread the
992 /// accesses away from each other to avoid false sharing (consider a
993 /// stride of 7 with a capacity of 8). To that end we try a few taking
994 /// care to observe that advancing by -1 is as bad as advancing by 1
995 /// when in comes to false sharing.
997 /// The simple way to avoid false sharing would be to pad each
998 /// SingleElementQueue, but since we have capacity_ of them that could
999 /// waste a lot of space.
1000 static int computeStride(size_t capacity) noexcept {
1001 static const int smallPrimes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23 };
1005 for (int stride : smallPrimes) {
1006 if ((stride % capacity) == 0 || (capacity % stride) == 0) {
1009 size_t sep = stride % capacity;
1010 sep = std::min(sep, capacity - sep);
1011 if (sep > bestSep) {
1012 bestStride = stride;
1019 /// Returns the index into slots_ that should be used when enqueuing or
1020 /// dequeuing with the specified ticket
1021 size_t idx(uint64_t ticket, size_t cap, int stride) noexcept {
1022 return ((ticket * stride) % cap) + kSlotPadding;
1025 /// Maps an enqueue or dequeue ticket to the turn should be used at the
1026 /// corresponding SingleElementQueue
1027 uint32_t turn(uint64_t ticket, size_t cap) noexcept {
1028 return ticket / cap;
1031 /// Tries to obtain a push ticket for which SingleElementQueue::enqueue
1032 /// won't block. Returns true on immediate success, false on immediate
1034 bool tryObtainReadyPushTicket(
1035 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1037 ticket = pushTicket_.load(std::memory_order_acquire); // A
1042 if (!slots[idx(ticket, cap, stride)]
1043 .mayEnqueue(turn(ticket, cap))) {
1044 // if we call enqueue(ticket, ...) on the SingleElementQueue
1045 // right now it would block, but this might no longer be the next
1046 // ticket. We can increase the chance of tryEnqueue success under
1047 // contention (without blocking) by rechecking the ticket dispenser
1049 ticket = pushTicket_.load(std::memory_order_acquire); // B
1050 if (prev == ticket) {
1051 // mayEnqueue was bracketed by two reads (A or prev B or prev
1052 // failing CAS to B), so we are definitely unable to enqueue
1056 // we will bracket the mayEnqueue check with a read (A or prev B
1057 // or prev failing CAS) and the following CAS. If the CAS fails
1058 // it will effect a load of pushTicket_
1059 if (pushTicket_.compare_exchange_strong(ticket, ticket + 1)) {
1066 /// Tries until when to obtain a push ticket for which
1067 /// SingleElementQueue::enqueue won't block. Returns true on success, false
1069 /// ticket is filled on success AND failure.
1070 template <class Clock>
1071 bool tryObtainPromisedPushTicketUntil(
1072 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride,
1073 const std::chrono::time_point<Clock>& when
1075 bool deadlineReached = false;
1076 while (!deadlineReached) {
1077 if (static_cast<Derived<T,Atom,Dynamic>*>(this)->
1078 tryObtainPromisedPushTicket(ticket, slots, cap, stride)) {
1081 // ticket is a blocking ticket until the preceding ticket has been
1082 // processed: wait until this ticket's turn arrives. We have not reserved
1083 // this ticket so we will have to re-attempt to get a non-blocking ticket
1084 // if we wake up before we time-out.
1085 deadlineReached = !slots[idx(ticket, cap, stride)]
1086 .tryWaitForEnqueueTurnUntil(turn(ticket, cap), pushSpinCutoff_,
1087 (ticket % kAdaptationFreq) == 0, when);
1092 /// Tries to obtain a push ticket which can be satisfied if all
1093 /// in-progress pops complete. This function does not block, but
1094 /// blocking may be required when using the returned ticket if some
1095 /// other thread's pop is still in progress (ticket has been granted but
1096 /// pop has not yet completed).
1097 bool tryObtainPromisedPushTicket(
1098 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1100 auto numPushes = pushTicket_.load(std::memory_order_acquire); // A
1105 auto numPops = popTicket_.load(std::memory_order_acquire); // B
1106 // n will be negative if pops are pending
1107 int64_t n = numPushes - numPops;
1109 if (n >= static_cast<ssize_t>(capacity_)) {
1110 // Full, linearize at B. We don't need to recheck the read we
1111 // performed at A, because if numPushes was stale at B then the
1112 // real numPushes value is even worse
1115 if (pushTicket_.compare_exchange_strong(numPushes, numPushes + 1)) {
1121 /// Tries to obtain a pop ticket for which SingleElementQueue::dequeue
1122 /// won't block. Returns true on immediate success, false on immediate
1124 bool tryObtainReadyPopTicket(
1125 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1127 ticket = popTicket_.load(std::memory_order_acquire);
1132 if (!slots[idx(ticket, cap, stride)]
1133 .mayDequeue(turn(ticket, cap))) {
1135 ticket = popTicket_.load(std::memory_order_acquire);
1136 if (prev == ticket) {
1140 if (popTicket_.compare_exchange_strong(ticket, ticket + 1)) {
1147 /// Similar to tryObtainReadyPopTicket, but returns a pop ticket whose
1148 /// corresponding push ticket has already been handed out, rather than
1149 /// returning one whose corresponding push ticket has already been
1150 /// completed. This means that there is a possibility that the caller
1151 /// will block when using the ticket, but it allows the user to rely on
1152 /// the fact that if enqueue has succeeded, tryObtainPromisedPopTicket
1153 /// will return true. The "try" part of this is that we won't have
1154 /// to block waiting for someone to call enqueue, although we might
1155 /// have to block waiting for them to finish executing code inside the
1156 /// MPMCQueue itself.
1157 bool tryObtainPromisedPopTicket(
1158 uint64_t& ticket, Slot*& slots, size_t& cap, int& stride
1160 auto numPops = popTicket_.load(std::memory_order_acquire); // A
1162 auto numPushes = pushTicket_.load(std::memory_order_acquire); // B
1163 if (numPops >= numPushes) {
1164 // Empty, or empty with pending pops. Linearize at B. We don't
1165 // need to recheck the read we performed at A, because if numPops
1166 // is stale then the fresh value is larger and the >= is still true
1169 if (popTicket_.compare_exchange_strong(numPops, numPops + 1)) {
1179 // Given a ticket, constructs an enqueued item using args
1180 template <typename ...Args>
1181 void enqueueWithTicketBase(
1182 uint64_t ticket, Slot* slots, size_t cap, int stride, Args&&... args
1184 slots[idx(ticket, cap, stride)]
1185 .enqueue(turn(ticket, cap),
1187 (ticket % kAdaptationFreq) == 0,
1188 std::forward<Args>(args)...);
1191 // To support tracking ticket numbers in MPMCPipelineStageImpl
1192 template <typename ...Args>
1193 void enqueueWithTicket(uint64_t ticket, Args&&... args) noexcept {
1194 enqueueWithTicketBase(ticket, slots_, capacity_, stride_,
1195 std::forward<Args>(args)...);
1198 // Given a ticket, dequeues the corresponding element
1199 void dequeueWithTicketBase(
1200 uint64_t ticket, Slot* slots, size_t cap, int stride, T& elem
1202 slots[idx(ticket, cap, stride)]
1203 .dequeue(turn(ticket, cap),
1205 (ticket % kAdaptationFreq) == 0,
1210 /// SingleElementQueue implements a blocking queue that holds at most one
1211 /// item, and that requires its users to assign incrementing identifiers
1212 /// (turns) to each enqueue and dequeue operation. Note that the turns
1213 /// used by SingleElementQueue are doubled inside the TurnSequencer
1214 template <typename T, template <typename> class Atom>
1215 struct SingleElementQueue {
1217 ~SingleElementQueue() noexcept {
1218 if ((sequencer_.uncompletedTurnLSB() & 1) == 1) {
1219 // we are pending a dequeue, so we have a constructed item
1224 /// enqueue using in-place noexcept construction
1225 template <typename ...Args,
1226 typename = typename std::enable_if<
1227 std::is_nothrow_constructible<T,Args...>::value>::type>
1228 void enqueue(const uint32_t turn,
1229 Atom<uint32_t>& spinCutoff,
1230 const bool updateSpinCutoff,
1231 Args&&... args) noexcept {
1232 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
1233 new (&contents_) T(std::forward<Args>(args)...);
1234 sequencer_.completeTurn(turn * 2);
1237 /// enqueue using move construction, either real (if
1238 /// is_nothrow_move_constructible) or simulated using relocation and
1239 /// default construction (if IsRelocatable and has_nothrow_constructor)
1240 template <typename = typename std::enable_if<
1241 (folly::IsRelocatable<T>::value &&
1242 boost::has_nothrow_constructor<T>::value) ||
1243 std::is_nothrow_constructible<T, T&&>::value>::type>
1244 void enqueue(const uint32_t turn,
1245 Atom<uint32_t>& spinCutoff,
1246 const bool updateSpinCutoff,
1247 T&& goner) noexcept {
1253 typename std::conditional<std::is_nothrow_constructible<T,T&&>::value,
1254 ImplByMove, ImplByRelocation>::type());
1257 /// Waits until either:
1258 /// 1: the dequeue turn preceding the given enqueue turn has arrived
1259 /// 2: the given deadline has arrived
1260 /// Case 1 returns true, case 2 returns false.
1261 template <class Clock>
1262 bool tryWaitForEnqueueTurnUntil(
1263 const uint32_t turn,
1264 Atom<uint32_t>& spinCutoff,
1265 const bool updateSpinCutoff,
1266 const std::chrono::time_point<Clock>& when) noexcept {
1267 return sequencer_.tryWaitForTurn(
1268 turn * 2, spinCutoff, updateSpinCutoff, &when);
1271 bool mayEnqueue(const uint32_t turn) const noexcept {
1272 return sequencer_.isTurn(turn * 2);
1275 void dequeue(uint32_t turn,
1276 Atom<uint32_t>& spinCutoff,
1277 const bool updateSpinCutoff,
1283 typename std::conditional<folly::IsRelocatable<T>::value,
1285 ImplByMove>::type());
1288 bool mayDequeue(const uint32_t turn) const noexcept {
1289 return sequencer_.isTurn(turn * 2 + 1);
1293 /// Storage for a T constructed with placement new
1294 typename std::aligned_storage<sizeof(T),alignof(T)>::type contents_;
1296 /// Even turns are pushes, odd turns are pops
1297 TurnSequencer<Atom> sequencer_;
1300 return static_cast<T*>(static_cast<void*>(&contents_));
1303 void destroyContents() noexcept {
1307 // g++ doesn't seem to have std::is_nothrow_destructible yet
1310 memset(&contents_, 'Q', sizeof(T));
1314 /// Tag classes for dispatching to enqueue/dequeue implementation.
1315 struct ImplByRelocation {};
1316 struct ImplByMove {};
1318 /// enqueue using nothrow move construction.
1319 void enqueueImpl(const uint32_t turn,
1320 Atom<uint32_t>& spinCutoff,
1321 const bool updateSpinCutoff,
1323 ImplByMove) noexcept {
1324 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
1325 new (&contents_) T(std::move(goner));
1326 sequencer_.completeTurn(turn * 2);
1329 /// enqueue by simulating nothrow move with relocation, followed by
1330 /// default construction to a noexcept relocation.
1331 void enqueueImpl(const uint32_t turn,
1332 Atom<uint32_t>& spinCutoff,
1333 const bool updateSpinCutoff,
1335 ImplByRelocation) noexcept {
1336 sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
1337 memcpy(&contents_, &goner, sizeof(T));
1338 sequencer_.completeTurn(turn * 2);
1342 /// dequeue by destructing followed by relocation. This version is preferred,
1343 /// because as much work as possible can be done before waiting.
1344 void dequeueImpl(uint32_t turn,
1345 Atom<uint32_t>& spinCutoff,
1346 const bool updateSpinCutoff,
1348 ImplByRelocation) noexcept {
1352 // unlikely, but if we don't complete our turn the queue will die
1354 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
1355 memcpy(&elem, &contents_, sizeof(T));
1356 sequencer_.completeTurn(turn * 2 + 1);
1359 /// dequeue by nothrow move assignment.
1360 void dequeueImpl(uint32_t turn,
1361 Atom<uint32_t>& spinCutoff,
1362 const bool updateSpinCutoff,
1364 ImplByMove) noexcept {
1365 sequencer_.waitForTurn(turn * 2 + 1, spinCutoff, updateSpinCutoff);
1366 elem = std::move(*ptr());
1368 sequencer_.completeTurn(turn * 2 + 1);
1372 } // namespace detail
1374 } // namespace folly