2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <sys/types.h>
31 #include <folly/FileUtil.h>
32 #include <folly/io/async/EventBase.h>
33 #include <folly/io/async/EventHandler.h>
34 #include <folly/io/async/DelayedDestruction.h>
35 #include <folly/io/async/Request.h>
36 #include <folly/Likely.h>
37 #include <folly/ScopeGuard.h>
38 #include <folly/SpinLock.h>
40 #include <glog/logging.h>
42 #if __linux__ && !__ANDROID__
43 #define FOLLY_HAVE_EVENTFD
44 #include <folly/io/async/EventFDWrapper.h>
50 * A producer-consumer queue for passing messages between EventBase threads.
52 * Messages can be added to the queue from any thread. Multiple consumers may
53 * listen to the queue from multiple EventBase threads.
55 * A NotificationQueue may not be destroyed while there are still consumers
56 * registered to receive events from the queue. It is the user's
57 * responsibility to ensure that all consumers are unregistered before the
60 * MessageT should be MoveConstructible (i.e., must support either a move
61 * constructor or a copy constructor, or both). Ideally it's move constructor
62 * (or copy constructor if no move constructor is provided) should never throw
63 * exceptions. If the constructor may throw, the consumers could end up
64 * spinning trying to move a message off the queue and failing, and then
67 template<typename MessageT>
68 class NotificationQueue {
71 * A callback interface for consuming messages from the queue as they arrive.
73 class Consumer : public DelayedDestruction, private EventHandler {
75 enum : uint16_t { kDefaultMaxReadAtOnce = 10 };
79 destroyedFlagPtr_(nullptr),
80 maxReadAtOnce_(kDefaultMaxReadAtOnce) {}
82 // create a consumer in-place, without the need to build new class
83 template <typename TCallback>
84 static std::unique_ptr<Consumer, DelayedDestruction::Destructor> make(
85 TCallback&& callback);
88 * messageAvailable() will be invoked whenever a new
89 * message is available from the pipe.
91 virtual void messageAvailable(MessageT&& message) = 0;
94 * Begin consuming messages from the specified queue.
96 * messageAvailable() will be called whenever a message is available. This
97 * consumer will continue to consume messages until stopConsuming() is
100 * A Consumer may only consume messages from a single NotificationQueue at
101 * a time. startConsuming() should not be called if this consumer is
104 void startConsuming(EventBase* eventBase, NotificationQueue* queue) {
105 init(eventBase, queue);
106 registerHandler(READ | PERSIST);
110 * Same as above but registers this event handler as internal so that it
111 * doesn't count towards the pending reader count for the IOLoop.
113 void startConsumingInternal(
114 EventBase* eventBase, NotificationQueue* queue) {
115 init(eventBase, queue);
116 registerInternalHandler(READ | PERSIST);
120 * Stop consuming messages.
122 * startConsuming() may be called again to resume consumption of messages
123 * at a later point in time.
125 void stopConsuming();
128 * Consume messages off the queue until it is empty. No messages may be
129 * added to the queue while it is draining, so that the process is bounded.
130 * To that end, putMessage/tryPutMessage will throw an std::runtime_error,
131 * and tryPutMessageNoThrow will return false.
133 * @returns true if the queue was drained, false otherwise. In practice,
134 * this will only fail if someone else is already draining the queue.
136 bool consumeUntilDrained(size_t* numConsumed = nullptr) noexcept;
139 * Get the NotificationQueue that this consumer is currently consuming
140 * messages from. Returns nullptr if the consumer is not currently
141 * consuming events from any queue.
143 NotificationQueue* getCurrentQueue() const {
148 * Set a limit on how many messages this consumer will read each iteration
149 * around the event loop.
151 * This helps rate-limit how much work the Consumer will do each event loop
152 * iteration, to prevent it from starving other event handlers.
154 * A limit of 0 means no limit will be enforced. If unset, the limit
155 * defaults to kDefaultMaxReadAtOnce (defined to 10 above).
157 void setMaxReadAtOnce(uint32_t maxAtOnce) {
158 maxReadAtOnce_ = maxAtOnce;
160 uint32_t getMaxReadAtOnce() const {
161 return maxReadAtOnce_;
164 EventBase* getEventBase() {
168 void handlerReady(uint16_t events) noexcept override;
172 void destroy() override;
174 virtual ~Consumer() {}
178 * Consume messages off the the queue until
179 * - the queue is empty (1), or
180 * - until the consumer is destroyed, or
181 * - until the consumer is uninstalled, or
182 * - an exception is thrown in the course of dequeueing, or
183 * - unless isDrain is true, until the maxReadAtOnce_ limit is hit
185 * (1) Well, maybe. See logic/comments around "wasEmpty" in implementation.
187 void consumeMessages(bool isDrain, size_t* numConsumed = nullptr) noexcept;
189 void setActive(bool active, bool shouldLock = false) {
195 queue_->spinlock_.lock();
197 if (!active_ && active) {
198 ++queue_->numActiveConsumers_;
199 } else if (active_ && !active) {
200 --queue_->numActiveConsumers_;
204 queue_->spinlock_.unlock();
207 void init(EventBase* eventBase, NotificationQueue* queue);
209 NotificationQueue* queue_;
210 bool* destroyedFlagPtr_;
211 uint32_t maxReadAtOnce_;
216 class SimpleConsumer {
218 explicit SimpleConsumer(NotificationQueue& queue) : queue_(queue) {
219 ++queue_.numConsumers_;
223 --queue_.numConsumers_;
227 return queue_.eventfd_ >= 0 ? queue_.eventfd_ : queue_.pipeFds_[0];
231 NotificationQueue& queue_;
236 #ifdef FOLLY_HAVE_EVENTFD
242 * Create a new NotificationQueue.
244 * If the maxSize parameter is specified, this sets the maximum queue size
245 * that will be enforced by tryPutMessage(). (This size is advisory, and may
246 * be exceeded if producers explicitly use putMessage() instead of
249 * The fdType parameter determines the type of file descriptor used
250 * internally to signal message availability. The default (eventfd) is
251 * preferable for performance and because it won't fail when the queue gets
252 * too long. It is not available on on older and non-linux kernels, however.
253 * In this case the code will fall back to using a pipe, the parameter is
254 * mostly for testing purposes.
256 explicit NotificationQueue(uint32_t maxSize = 0,
257 #ifdef FOLLY_HAVE_EVENTFD
258 FdType fdType = FdType::EVENTFD)
260 FdType fdType = FdType::PIPE)
264 advisoryMaxQueueSize_(maxSize),
265 pid_(pid_t(getpid())),
268 RequestContext::saveContext();
270 #ifdef FOLLY_HAVE_EVENTFD
271 if (fdType == FdType::EVENTFD) {
272 eventfd_ = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
273 if (eventfd_ == -1) {
274 if (errno == ENOSYS || errno == EINVAL) {
275 // eventfd not availalble
276 LOG(ERROR) << "failed to create eventfd for NotificationQueue: "
277 << errno << ", falling back to pipe mode (is your kernel "
279 fdType = FdType::PIPE;
282 folly::throwSystemError("Failed to create eventfd for "
283 "NotificationQueue", errno);
288 if (fdType == FdType::PIPE) {
289 if (pipe(pipeFds_)) {
290 folly::throwSystemError("Failed to create pipe for NotificationQueue",
294 // put both ends of the pipe into non-blocking mode
295 if (fcntl(pipeFds_[0], F_SETFL, O_RDONLY | O_NONBLOCK) != 0) {
296 folly::throwSystemError("failed to put NotificationQueue pipe read "
297 "endpoint into non-blocking mode", errno);
299 if (fcntl(pipeFds_[1], F_SETFL, O_WRONLY | O_NONBLOCK) != 0) {
300 folly::throwSystemError("failed to put NotificationQueue pipe write "
301 "endpoint into non-blocking mode", errno);
304 ::close(pipeFds_[0]);
305 ::close(pipeFds_[1]);
311 ~NotificationQueue() {
316 if (pipeFds_[0] >= 0) {
317 ::close(pipeFds_[0]);
320 if (pipeFds_[1] >= 0) {
321 ::close(pipeFds_[1]);
327 * Set the advisory maximum queue size.
329 * This maximum queue size affects calls to tryPutMessage(). Message
330 * producers can still use the putMessage() call to unconditionally put a
331 * message on the queue, ignoring the configured maximum queue size. This
332 * can cause the queue size to exceed the configured maximum.
334 void setMaxQueueSize(uint32_t max) {
335 advisoryMaxQueueSize_ = max;
339 * Attempt to put a message on the queue if the queue is not already full.
341 * If the queue is full, a std::overflow_error will be thrown. The
342 * setMaxQueueSize() function controls the maximum queue size.
344 * If the queue is currently draining, an std::runtime_error will be thrown.
346 * This method may contend briefly on a spinlock if many threads are
347 * concurrently accessing the queue, but for all intents and purposes it will
348 * immediately place the message on the queue and return.
350 * tryPutMessage() may throw std::bad_alloc if memory allocation fails, and
351 * may throw any other exception thrown by the MessageT move/copy
354 void tryPutMessage(MessageT&& message) {
355 putMessageImpl(std::move(message), advisoryMaxQueueSize_);
357 void tryPutMessage(const MessageT& message) {
358 putMessageImpl(message, advisoryMaxQueueSize_);
362 * No-throw versions of the above. Instead returns true on success, false on
365 * Only std::overflow_error (the common exception case) and std::runtime_error
366 * (which indicates that the queue is being drained) are prevented from being
367 * thrown. User code must still catch std::bad_alloc errors.
369 bool tryPutMessageNoThrow(MessageT&& message) {
370 return putMessageImpl(std::move(message), advisoryMaxQueueSize_, false);
372 bool tryPutMessageNoThrow(const MessageT& message) {
373 return putMessageImpl(message, advisoryMaxQueueSize_, false);
377 * Unconditionally put a message on the queue.
379 * This method is like tryPutMessage(), but ignores the maximum queue size
380 * and always puts the message on the queue, even if the maximum queue size
383 * putMessage() may throw
384 * - std::bad_alloc if memory allocation fails, and may
385 * - std::runtime_error if the queue is currently draining
386 * - any other exception thrown by the MessageT move/copy constructor.
388 void putMessage(MessageT&& message) {
389 putMessageImpl(std::move(message), 0);
391 void putMessage(const MessageT& message) {
392 putMessageImpl(message, 0);
396 * Put several messages on the queue.
398 template<typename InputIteratorT>
399 void putMessages(InputIteratorT first, InputIteratorT last) {
400 typedef typename std::iterator_traits<InputIteratorT>::iterator_category
402 putMessagesImpl(first, last, IterCategory());
406 * Try to immediately pull a message off of the queue, without blocking.
408 * If a message is immediately available, the result parameter will be
409 * updated to contain the message contents and true will be returned.
411 * If no message is available, false will be returned and result will be left
414 bool tryConsume(MessageT& result) {
415 SCOPE_EXIT { syncSignalAndQueue(); };
419 folly::SpinLockGuard g(spinlock_);
421 if (UNLIKELY(queue_.empty())) {
425 auto data = std::move(queue_.front());
427 RequestContext::setContext(data.second);
434 size_t size() const {
435 folly::SpinLockGuard g(spinlock_);
436 return queue_.size();
440 * Check that the NotificationQueue is being used from the correct process.
442 * If you create a NotificationQueue in one process, then fork, and try to
443 * send messages to the queue from the child process, you're going to have a
444 * bad time. Unfortunately users have (accidentally) run into this.
446 * Because we use an eventfd/pipe, the child process can actually signal the
447 * parent process that an event is ready. However, it can't put anything on
448 * the parent's queue, so the parent wakes up and finds an empty queue. This
449 * check ensures that we catch the problem in the misbehaving child process
450 * code, and crash before signalling the parent process.
452 void checkPid() const { CHECK_EQ(pid_, pid_t(getpid())); }
455 // Forbidden copy constructor and assignment operator
456 NotificationQueue(NotificationQueue const &) = delete;
457 NotificationQueue& operator=(NotificationQueue const &) = delete;
459 inline bool checkQueueSize(size_t maxSize, bool throws=true) const {
460 DCHECK(0 == spinlock_.trylock());
461 if (maxSize > 0 && queue_.size() >= maxSize) {
463 throw std::overflow_error("unable to add message to NotificationQueue: "
471 inline bool checkDraining(bool throws=true) {
472 if (UNLIKELY(draining_ && throws)) {
473 throw std::runtime_error("queue is draining, cannot add message");
479 // TODO 10860938 Remove after figuring out crash
480 mutable std::atomic<int> eventBytes_{0};
481 mutable std::atomic<int> maxEventBytes_{0};
484 void ensureSignalLocked() const {
485 // semantics: empty fd == empty queue <=> !signal_
490 ssize_t bytes_written = 0;
491 ssize_t bytes_expected = 0;
495 // eventfd(2) dictates that we must write a 64-bit integer
497 bytes_expected = static_cast<ssize_t>(sizeof(signal));
498 bytes_written = ::write(eventfd_, &signal, bytes_expected);
501 bytes_expected = static_cast<ssize_t>(sizeof(signal));
502 bytes_written = ::write(pipeFds_[1], &signal, bytes_expected);
504 } while (bytes_written == -1 && errno == EINTR);
507 if (bytes_written > 0) {
508 eventBytes_ += bytes_written;
509 maxEventBytes_ = std::max((int)maxEventBytes_, (int)eventBytes_);
513 if (bytes_written == bytes_expected) {
517 LOG(ERROR) << "NotificationQueue Write Error=" << errno
518 << " bytesInPipe=" << eventBytes_
519 << " maxInPipe=" << maxEventBytes_ << " queue=" << size();
521 folly::throwSystemError("failed to signal NotificationQueue after "
526 void drainSignalsLocked() {
527 ssize_t bytes_read = 0;
530 bytes_read = readNoInt(eventfd_, &message, sizeof(message));
531 CHECK(bytes_read != -1 || errno == EAGAIN);
533 // There should only be one byte in the pipe. To avoid potential leaks we still drain.
536 while ((result = readNoInt(pipeFds_[0], &message, sizeof(message))) != -1) {
537 bytes_read += result;
539 CHECK(result == -1 && errno == EAGAIN);
540 LOG_IF(ERROR, bytes_read > 1)
541 << "[NotificationQueue] Unexpected state while draining pipe: bytes_read="
542 << bytes_read << " bytes, expected <= 1";
544 LOG_IF(ERROR, (signal_ && bytes_read == 0) || (!signal_ && bytes_read > 0))
545 << "[NotificationQueue] Unexpected state while draining signals: signal_="
546 << signal_ << " bytes_read=" << bytes_read;
551 if (bytes_read > 0) {
552 eventBytes_ -= bytes_read;
557 void ensureSignal() const {
558 folly::SpinLockGuard g(spinlock_);
559 ensureSignalLocked();
562 void syncSignalAndQueue() {
563 folly::SpinLockGuard g(spinlock_);
565 if (queue_.empty()) {
566 drainSignalsLocked();
568 ensureSignalLocked();
572 bool putMessageImpl(MessageT&& message, size_t maxSize, bool throws=true) {
576 folly::SpinLockGuard g(spinlock_);
577 if (checkDraining(throws) || !checkQueueSize(maxSize, throws)) {
580 // We only need to signal an event if not all consumers are
582 if (numActiveConsumers_ < numConsumers_) {
585 queue_.emplace_back(std::move(message), RequestContext::saveContext());
587 ensureSignalLocked();
594 const MessageT& message, size_t maxSize, bool throws=true) {
598 folly::SpinLockGuard g(spinlock_);
599 if (checkDraining(throws) || !checkQueueSize(maxSize, throws)) {
602 if (numActiveConsumers_ < numConsumers_) {
605 queue_.emplace_back(message, RequestContext::saveContext());
607 ensureSignalLocked();
613 template<typename InputIteratorT>
614 void putMessagesImpl(InputIteratorT first, InputIteratorT last,
615 std::input_iterator_tag) {
620 folly::SpinLockGuard g(spinlock_);
622 while (first != last) {
623 queue_.emplace_back(*first, RequestContext::saveContext());
627 if (numActiveConsumers_ < numConsumers_) {
631 ensureSignalLocked();
636 mutable folly::SpinLock spinlock_;
637 mutable bool signal_{false};
639 int pipeFds_[2]; // to fallback to on older/non-linux systems
640 uint32_t advisoryMaxQueueSize_;
642 std::deque<std::pair<MessageT, std::shared_ptr<RequestContext>>> queue_;
643 int numConsumers_{0};
644 std::atomic<int> numActiveConsumers_{0};
645 bool draining_{false};
648 template<typename MessageT>
649 void NotificationQueue<MessageT>::Consumer::destroy() {
650 // If we are in the middle of a call to handlerReady(), destroyedFlagPtr_
651 // will be non-nullptr. Mark the value that it points to, so that
652 // handlerReady() will know the callback is destroyed, and that it cannot
653 // access any member variables anymore.
654 if (destroyedFlagPtr_) {
655 *destroyedFlagPtr_ = true;
658 DelayedDestruction::destroy();
661 template<typename MessageT>
662 void NotificationQueue<MessageT>::Consumer::handlerReady(uint16_t /*events*/)
664 consumeMessages(false);
667 template<typename MessageT>
668 void NotificationQueue<MessageT>::Consumer::consumeMessages(
669 bool isDrain, size_t* numConsumed) noexcept {
670 DestructorGuard dg(this);
671 uint32_t numProcessed = 0;
675 queue_->syncSignalAndQueue();
678 SCOPE_EXIT { setActive(false, /* shouldLock = */ true); };
680 if (numConsumed != nullptr) {
681 *numConsumed = numProcessed;
685 // Now pop the message off of the queue.
687 // We have to manually acquire and release the spinlock here, rather than
688 // using SpinLockHolder since the MessageT has to be constructed while
689 // holding the spinlock and available after we release it. SpinLockHolder
690 // unfortunately doesn't provide a release() method. (We can't construct
691 // MessageT first since we have no guarantee that MessageT has a default
693 queue_->spinlock_.lock();
697 if (UNLIKELY(queue_->queue_.empty())) {
698 // If there is no message, we've reached the end of the queue, return.
700 queue_->spinlock_.unlock();
704 // Pull a message off the queue.
705 auto& data = queue_->queue_.front();
707 MessageT msg(std::move(data.first));
709 RequestContext::setContext(data.second);
710 queue_->queue_.pop_front();
712 // Check to see if the queue is empty now.
713 // We use this as an optimization to see if we should bother trying to
714 // loop again and read another message after invoking this callback.
715 bool wasEmpty = queue_->queue_.empty();
720 // Now unlock the spinlock before we invoke the callback.
721 queue_->spinlock_.unlock();
725 bool callbackDestroyed = false;
726 CHECK(destroyedFlagPtr_ == nullptr);
727 destroyedFlagPtr_ = &callbackDestroyed;
728 messageAvailable(std::move(msg));
729 destroyedFlagPtr_ = nullptr;
731 RequestContext::setContext(old_ctx);
733 // If the callback was destroyed before it returned, we are done
734 if (callbackDestroyed) {
738 // If the callback is no longer installed, we are done.
739 if (queue_ == nullptr) {
743 // If we have hit maxReadAtOnce_, we are done.
745 if (!isDrain && maxReadAtOnce_ > 0 &&
746 numProcessed >= maxReadAtOnce_) {
750 // If the queue was empty before we invoked the callback, it's probable
751 // that it is still empty now. Just go ahead and return, rather than
752 // looping again and trying to re-read from the eventfd. (If a new
753 // message had in fact arrived while we were invoking the callback, we
754 // will simply be woken up the next time around the event loop and will
755 // process the message then.)
759 } catch (const std::exception& ex) {
760 // This catch block is really just to handle the case where the MessageT
761 // constructor throws. The messageAvailable() callback itself is
762 // declared as noexcept and should never throw.
764 // If the MessageT constructor does throw we try to handle it as best as
765 // we can, but we can't work miracles. We will just ignore the error for
766 // now and return. The next time around the event loop we will end up
767 // trying to read the message again. If MessageT continues to throw we
768 // will never make forward progress and will keep trying each time around
771 // Unlock the spinlock.
772 queue_->spinlock_.unlock();
780 template<typename MessageT>
781 void NotificationQueue<MessageT>::Consumer::init(
782 EventBase* eventBase,
783 NotificationQueue* queue) {
784 assert(eventBase->isInEventBaseThread());
785 assert(queue_ == nullptr);
786 assert(!isHandlerRegistered());
794 folly::SpinLockGuard g(queue_->spinlock_);
795 queue_->numConsumers_++;
797 queue_->ensureSignal();
799 if (queue_->eventfd_ >= 0) {
800 initHandler(eventBase, queue_->eventfd_);
802 initHandler(eventBase, queue_->pipeFds_[0]);
806 template<typename MessageT>
807 void NotificationQueue<MessageT>::Consumer::stopConsuming() {
808 if (queue_ == nullptr) {
809 assert(!isHandlerRegistered());
814 folly::SpinLockGuard g(queue_->spinlock_);
815 queue_->numConsumers_--;
819 assert(isHandlerRegistered());
825 template<typename MessageT>
826 bool NotificationQueue<MessageT>::Consumer::consumeUntilDrained(
827 size_t* numConsumed) noexcept {
828 DestructorGuard dg(this);
830 folly::SpinLockGuard g(queue_->spinlock_);
831 if (queue_->draining_) {
834 queue_->draining_ = true;
836 consumeMessages(true, numConsumed);
838 folly::SpinLockGuard g(queue_->spinlock_);
839 queue_->draining_ = false;
845 * Creates a NotificationQueue::Consumer wrapping a function object
846 * Modeled after AsyncTimeout::make
852 template <typename MessageT, typename TCallback>
853 struct notification_queue_consumer_wrapper
854 : public NotificationQueue<MessageT>::Consumer {
856 template <typename UCallback>
857 explicit notification_queue_consumer_wrapper(UCallback&& callback)
858 : callback_(std::forward<UCallback>(callback)) {}
860 // we are being stricter here and requiring noexcept for callback
861 void messageAvailable(MessageT&& message) override {
863 noexcept(std::declval<TCallback>()(std::forward<MessageT>(message))),
864 "callback must be declared noexcept, e.g.: `[]() noexcept {}`"
867 callback_(std::forward<MessageT>(message));
874 } // namespace detail
876 template <typename MessageT>
877 template <typename TCallback>
878 std::unique_ptr<typename NotificationQueue<MessageT>::Consumer,
879 DelayedDestruction::Destructor>
880 NotificationQueue<MessageT>::Consumer::make(TCallback&& callback) {
881 return std::unique_ptr<NotificationQueue<MessageT>::Consumer,
882 DelayedDestruction::Destructor>(
883 new detail::notification_queue_consumer_wrapper<
885 typename std::decay<TCallback>::type>(
886 std::forward<TCallback>(callback)));