2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_FORMAT_MACROS
18 #define __STDC_FORMAT_MACROS
21 #include <folly/io/async/AsyncServerSocket.h>
23 #include <folly/FileUtil.h>
24 #include <folly/Portability.h>
25 #include <folly/SocketAddress.h>
26 #include <folly/String.h>
27 #include <folly/detail/SocketFastOpen.h>
28 #include <folly/io/async/EventBase.h>
29 #include <folly/io/async/NotificationQueue.h>
30 #include <folly/portability/Fcntl.h>
31 #include <folly/portability/Sockets.h>
32 #include <folly/portability/Unistd.h>
36 #include <sys/types.h>
38 namespace fsp = folly::portability::sockets;
42 const uint32_t AsyncServerSocket::kDefaultMaxAcceptAtOnce;
43 const uint32_t AsyncServerSocket::kDefaultCallbackAcceptAtOnce;
44 const uint32_t AsyncServerSocket::kDefaultMaxMessagesInQueue;
46 int setCloseOnExec(int fd, int value) {
47 // Read the current flags
48 int old_flags = fcntl(fd, F_GETFD, 0);
50 // If reading the flags failed, return error indication now
54 // Set just the flag we want to set
57 new_flags = old_flags | FD_CLOEXEC;
59 new_flags = old_flags & ~FD_CLOEXEC;
61 // Store modified flag word in the descriptor
62 return fcntl(fd, F_SETFD, new_flags);
65 void AsyncServerSocket::RemoteAcceptor::start(
66 EventBase* eventBase, uint32_t maxAtOnce, uint32_t maxInQueue) {
67 setMaxReadAtOnce(maxAtOnce);
68 queue_.setMaxQueueSize(maxInQueue);
70 if (!eventBase->runInEventBaseThread([=](){
71 callback_->acceptStarted();
72 this->startConsuming(eventBase, &queue_);
74 throw std::invalid_argument("unable to start waiting on accept "
75 "notification queue in the specified "
80 void AsyncServerSocket::RemoteAcceptor::stop(
81 EventBase* eventBase, AcceptCallback* callback) {
82 if (!eventBase->runInEventBaseThread([=](){
83 callback->acceptStopped();
86 throw std::invalid_argument("unable to start waiting on accept "
87 "notification queue in the specified "
92 void AsyncServerSocket::RemoteAcceptor::messageAvailable(
96 case MessageType::MSG_NEW_CONN:
98 if (connectionEventCallback_) {
99 connectionEventCallback_->onConnectionDequeuedByAcceptorCallback(
100 msg.fd, msg.address);
102 callback_->connectionAccepted(msg.fd, msg.address);
105 case MessageType::MSG_ERROR:
107 std::runtime_error ex(msg.msg);
108 callback_->acceptError(ex);
113 LOG(ERROR) << "invalid accept notification message type "
115 std::runtime_error ex(
116 "received invalid accept notification message type");
117 callback_->acceptError(ex);
123 * AsyncServerSocket::BackoffTimeout
125 class AsyncServerSocket::BackoffTimeout : public AsyncTimeout {
127 // Disallow copy, move, and default constructors.
128 BackoffTimeout(BackoffTimeout&&) = delete;
129 explicit BackoffTimeout(AsyncServerSocket* socket)
130 : AsyncTimeout(socket->getEventBase()), socket_(socket) {}
132 void timeoutExpired() noexcept override { socket_->backoffTimeoutExpired(); }
135 AsyncServerSocket* socket_;
139 * AsyncServerSocket methods
142 AsyncServerSocket::AsyncServerSocket(EventBase* eventBase)
143 : eventBase_(eventBase),
145 maxAcceptAtOnce_(kDefaultMaxAcceptAtOnce),
146 maxNumMsgsInQueue_(kDefaultMaxMessagesInQueue),
147 acceptRateAdjustSpeed_(0),
149 lastAccepTimestamp_(std::chrono::steady_clock::now()),
150 numDroppedConnections_(0),
152 backoffTimeout_(nullptr),
154 keepAliveEnabled_(true),
156 shutdownSocketSet_(nullptr) {
159 void AsyncServerSocket::setShutdownSocketSet(ShutdownSocketSet* newSS) {
160 if (shutdownSocketSet_ == newSS) {
163 if (shutdownSocketSet_) {
164 for (auto& h : sockets_) {
165 shutdownSocketSet_->remove(h.socket_);
168 shutdownSocketSet_ = newSS;
169 if (shutdownSocketSet_) {
170 for (auto& h : sockets_) {
171 shutdownSocketSet_->add(h.socket_);
176 AsyncServerSocket::~AsyncServerSocket() {
177 assert(callbacks_.empty());
180 int AsyncServerSocket::stopAccepting(int shutdownFlags) {
182 for (auto& handler : sockets_) {
183 VLOG(10) << "AsyncServerSocket::stopAccepting " << this <<
186 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
188 // When destroy is called, unregister and close the socket immediately.
191 // Close the sockets in reverse order as they were opened to avoid
192 // the condition where another process concurrently tries to open
193 // the same port, succeed to bind the first socket but fails on the
194 // second because it hasn't been closed yet.
195 for (; !sockets_.empty(); sockets_.pop_back()) {
196 auto& handler = sockets_.back();
197 handler.unregisterHandler();
198 if (shutdownSocketSet_) {
199 shutdownSocketSet_->close(handler.socket_);
200 } else if (shutdownFlags >= 0) {
201 result = shutdownNoInt(handler.socket_, shutdownFlags);
202 pendingCloseSockets_.push_back(handler.socket_);
204 closeNoInt(handler.socket_);
208 // Destroy the backoff timout. This will cancel it if it is running.
209 delete backoffTimeout_;
210 backoffTimeout_ = nullptr;
212 // Close all of the callback queues to notify them that they are being
213 // destroyed. No one should access the AsyncServerSocket any more once
214 // destroy() is called. However, clear out callbacks_ before invoking the
215 // accept callbacks just in case. This will potentially help us detect the
216 // bug if one of the callbacks calls addAcceptCallback() or
217 // removeAcceptCallback().
218 std::vector<CallbackInfo> callbacksCopy;
219 callbacks_.swap(callbacksCopy);
220 for (std::vector<CallbackInfo>::iterator it = callbacksCopy.begin();
221 it != callbacksCopy.end();
223 // consumer may not be set if we are running in primary event base
225 DCHECK(it->eventBase);
226 it->consumer->stop(it->eventBase, it->callback);
228 DCHECK(it->callback);
229 it->callback->acceptStopped();
236 void AsyncServerSocket::destroy() {
238 for (auto s : pendingCloseSockets_) {
241 // Then call DelayedDestruction::destroy() to take care of
242 // whether or not we need immediate or delayed destruction
243 DelayedDestruction::destroy();
246 void AsyncServerSocket::attachEventBase(EventBase *eventBase) {
247 assert(eventBase_ == nullptr);
248 assert(eventBase->isInEventBaseThread());
250 eventBase_ = eventBase;
251 for (auto& handler : sockets_) {
252 handler.attachEventBase(eventBase);
256 void AsyncServerSocket::detachEventBase() {
257 assert(eventBase_ != nullptr);
258 assert(eventBase_->isInEventBaseThread());
261 eventBase_ = nullptr;
262 for (auto& handler : sockets_) {
263 handler.detachEventBase();
267 void AsyncServerSocket::useExistingSockets(const std::vector<int>& fds) {
268 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
270 if (sockets_.size() > 0) {
271 throw std::invalid_argument(
272 "cannot call useExistingSocket() on a "
273 "AsyncServerSocket that already has a socket");
277 // Set addressFamily_ from this socket.
278 // Note that the socket may not have been bound yet, but
279 // setFromLocalAddress() will still work and get the correct address family.
280 // We will update addressFamily_ again anyway if bind() is called later.
281 SocketAddress address;
282 address.setFromLocalAddress(fd);
285 if (noTransparentTls_) {
286 // Ignore return value, errors are ok
287 setsockopt(fd, SOL_SOCKET, SO_NO_TRANSPARENT_TLS, nullptr, 0);
291 setupSocket(fd, address.getFamily());
292 sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
293 sockets_.back().changeHandlerFD(fd);
297 void AsyncServerSocket::useExistingSocket(int fd) {
298 useExistingSockets({fd});
301 void AsyncServerSocket::bindSocket(
303 const SocketAddress& address,
304 bool isExistingSocket) {
305 sockaddr_storage addrStorage;
306 address.getAddress(&addrStorage);
307 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
309 if (fsp::bind(fd, saddr, address.getActualSize()) != 0) {
310 if (!isExistingSocket) {
313 folly::throwSystemError(errno,
314 "failed to bind to async server socket: " +
319 if (noTransparentTls_) {
320 // Ignore return value, errors are ok
321 setsockopt(fd, SOL_SOCKET, SO_NO_TRANSPARENT_TLS, nullptr, 0);
325 // If we just created this socket, update the EventHandler and set socket_
326 if (!isExistingSocket) {
327 sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
331 void AsyncServerSocket::bind(const SocketAddress& address) {
332 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
334 // useExistingSocket() may have been called to initialize socket_ already.
335 // However, in the normal case we need to create a new socket now.
336 // Don't set socket_ yet, so that socket_ will remain uninitialized if an
339 if (sockets_.size() == 0) {
340 fd = createSocket(address.getFamily());
341 } else if (sockets_.size() == 1) {
342 if (address.getFamily() != sockets_[0].addressFamily_) {
343 throw std::invalid_argument(
344 "Attempted to bind address to socket with "
345 "different address family");
347 fd = sockets_[0].socket_;
349 throw std::invalid_argument(
350 "Attempted to bind to multiple fds");
353 bindSocket(fd, address, !sockets_.empty());
356 void AsyncServerSocket::bind(
357 const std::vector<IPAddress>& ipAddresses,
359 if (ipAddresses.empty()) {
360 throw std::invalid_argument("No ip addresses were provided");
362 if (!sockets_.empty()) {
363 throw std::invalid_argument("Cannot call bind on a AsyncServerSocket "
364 "that already has a socket.");
367 for (const IPAddress& ipAddress : ipAddresses) {
368 SocketAddress address(ipAddress.toFullyQualified(), port);
369 int fd = createSocket(address.getFamily());
371 bindSocket(fd, address, false);
373 if (sockets_.size() == 0) {
374 throw std::runtime_error(
375 "did not bind any async server socket for port and addresses");
379 void AsyncServerSocket::bind(uint16_t port) {
380 struct addrinfo hints, *res0;
381 char sport[sizeof("65536")];
383 memset(&hints, 0, sizeof(hints));
384 hints.ai_family = AF_UNSPEC;
385 hints.ai_socktype = SOCK_STREAM;
386 hints.ai_flags = AI_PASSIVE | AI_NUMERICSERV;
387 snprintf(sport, sizeof(sport), "%u", port);
389 // On Windows the value we need to pass to bind to all available
390 // addresses is an empty string. Everywhere else, it's nullptr.
391 constexpr const char* kWildcardNode = kIsWindows ? "" : nullptr;
392 if (getaddrinfo(kWildcardNode, sport, &hints, &res0)) {
393 throw std::invalid_argument(
394 "Attempted to bind address to socket with "
398 SCOPE_EXIT { freeaddrinfo(res0); };
400 auto setupAddress = [&] (struct addrinfo* res) {
401 int s = fsp::socket(res->ai_family, res->ai_socktype, res->ai_protocol);
402 // IPv6/IPv4 may not be supported by the kernel
403 if (s < 0 && errno == EAFNOSUPPORT) {
409 setupSocket(s, res->ai_family);
415 if (res->ai_family == AF_INET6) {
417 CHECK(0 == setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
418 &v6only, sizeof(v6only)));
421 // Bind to the socket
422 if (fsp::bind(s, res->ai_addr, socklen_t(res->ai_addrlen)) != 0) {
423 folly::throwSystemError(
425 "failed to bind to async server socket for port ",
426 SocketAddress::getPortFrom(res->ai_addr),
428 SocketAddress::getFamilyNameFrom(res->ai_addr, "<unknown>"));
432 if (noTransparentTls_) {
433 // Ignore return value, errors are ok
434 setsockopt(s, SOL_SOCKET, SO_NO_TRANSPARENT_TLS, nullptr, 0);
438 SocketAddress address;
439 address.setFromLocalAddress(s);
441 sockets_.emplace_back(eventBase_, s, this, address.getFamily());
444 const int kNumTries = 25;
445 for (int tries = 1; true; tries++) {
446 // Prefer AF_INET6 addresses. RFC 3484 mandates that getaddrinfo
447 // should return IPv6 first and then IPv4 addresses, but glibc's
448 // getaddrinfo(nullptr) with AI_PASSIVE returns:
449 // - 0.0.0.0 (IPv4-only)
450 // - :: (IPv6+IPv4) in this order
451 // See: https://sourceware.org/bugzilla/show_bug.cgi?id=9981
452 for (struct addrinfo* res = res0; res; res = res->ai_next) {
453 if (res->ai_family == AF_INET6) {
458 // If port == 0, then we should try to bind to the same port on ipv4 and
459 // ipv6. So if we did bind to ipv6, figure out that port and use it.
460 if (sockets_.size() == 1 && port == 0) {
461 SocketAddress address;
462 address.setFromLocalAddress(sockets_.back().socket_);
463 snprintf(sport, sizeof(sport), "%u", address.getPort());
465 CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
469 for (struct addrinfo* res = res0; res; res = res->ai_next) {
470 if (res->ai_family != AF_INET6) {
474 } catch (const std::system_error&) {
475 // If we can't bind to the same port on ipv4 as ipv6 when using
476 // port=0 then we will retry again before giving up after
477 // kNumTries attempts. We do this by closing the sockets that
478 // were opened, then restarting from scratch.
479 if (port == 0 && !sockets_.empty() && tries != kNumTries) {
480 for (const auto& socket : sockets_) {
481 if (socket.socket_ <= 0) {
483 } else if (shutdownSocketSet_) {
484 shutdownSocketSet_->close(socket.socket_);
486 closeNoInt(socket.socket_);
490 snprintf(sport, sizeof(sport), "%u", port);
492 CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
502 if (sockets_.size() == 0) {
503 throw std::runtime_error(
504 "did not bind any async server socket for port");
508 void AsyncServerSocket::listen(int backlog) {
509 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
512 for (auto& handler : sockets_) {
513 if (fsp::listen(handler.socket_, backlog) == -1) {
514 folly::throwSystemError(errno,
515 "failed to listen on async server socket");
520 void AsyncServerSocket::getAddress(SocketAddress* addressReturn) const {
521 CHECK(sockets_.size() >= 1);
522 VLOG_IF(2, sockets_.size() > 1)
523 << "Warning: getAddress() called and multiple addresses available ("
524 << sockets_.size() << "). Returning only the first one.";
526 addressReturn->setFromLocalAddress(sockets_[0].socket_);
529 std::vector<SocketAddress> AsyncServerSocket::getAddresses()
531 CHECK(sockets_.size() >= 1);
532 auto tsaVec = std::vector<SocketAddress>(sockets_.size());
533 auto tsaIter = tsaVec.begin();
534 for (const auto& socket : sockets_) {
535 (tsaIter++)->setFromLocalAddress(socket.socket_);
540 void AsyncServerSocket::addAcceptCallback(AcceptCallback *callback,
541 EventBase *eventBase,
542 uint32_t maxAtOnce) {
543 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
545 // If this is the first accept callback and we are supposed to be accepting,
546 // start accepting once the callback is installed.
547 bool runStartAccepting = accepting_ && callbacks_.empty();
549 callbacks_.emplace_back(callback, eventBase);
552 // If this is the first accept callback and we are supposed to be accepting,
554 if (runStartAccepting) {
560 // Run in AsyncServerSocket's eventbase; notify that we are
561 // starting to accept connections
562 callback->acceptStarted();
566 // Start the remote acceptor.
568 // It would be nice if we could avoid starting the remote acceptor if
569 // eventBase == eventBase_. However, that would cause issues if
570 // detachEventBase() and attachEventBase() were ever used to change the
571 // primary EventBase for the server socket. Therefore we require the caller
572 // to specify a nullptr EventBase if they want to ensure that the callback is
573 // always invoked in the primary EventBase, and to be able to invoke that
574 // callback more efficiently without having to use a notification queue.
575 RemoteAcceptor* acceptor = nullptr;
577 acceptor = new RemoteAcceptor(callback, connectionEventCallback_);
578 acceptor->start(eventBase, maxAtOnce, maxNumMsgsInQueue_);
580 callbacks_.pop_back();
584 callbacks_.back().consumer = acceptor;
587 void AsyncServerSocket::removeAcceptCallback(AcceptCallback *callback,
588 EventBase *eventBase) {
589 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
591 // Find the matching AcceptCallback.
592 // We just do a simple linear search; we don't expect removeAcceptCallback()
593 // to be called frequently, and we expect there to only be a small number of
595 std::vector<CallbackInfo>::iterator it = callbacks_.begin();
598 if (it == callbacks_.end()) {
599 throw std::runtime_error("AsyncServerSocket::removeAcceptCallback(): "
600 "accept callback not found");
602 if (it->callback == callback &&
603 (it->eventBase == eventBase || eventBase == nullptr)) {
610 // Remove this callback from callbacks_.
612 // Do this before invoking the acceptStopped() callback, in case
613 // acceptStopped() invokes one of our methods that examines callbacks_.
615 // Save a copy of the CallbackInfo first.
616 CallbackInfo info(*it);
617 callbacks_.erase(it);
618 if (n < callbackIndex_) {
619 // We removed an element before callbackIndex_. Move callbackIndex_ back
620 // one step, since things after n have been shifted back by 1.
623 // We removed something at or after callbackIndex_.
624 // If we removed the last element and callbackIndex_ was pointing at it,
625 // we need to reset callbackIndex_ to 0.
626 if (callbackIndex_ >= callbacks_.size()) {
632 // consumer could be nullptr is we run callbacks in primary event
634 DCHECK(info.eventBase);
635 info.consumer->stop(info.eventBase, info.callback);
637 // callback invoked in the primary event base, just call directly
638 DCHECK(info.callback);
639 callback->acceptStopped();
642 // If we are supposed to be accepting but the last accept callback
643 // was removed, unregister for events until a callback is added.
644 if (accepting_ && callbacks_.empty()) {
645 for (auto& handler : sockets_) {
646 handler.unregisterHandler();
651 void AsyncServerSocket::startAccepting() {
652 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
655 if (callbacks_.empty()) {
656 // We can't actually begin accepting if no callbacks are defined.
657 // Wait until a callback is added to start accepting.
661 for (auto& handler : sockets_) {
662 if (!handler.registerHandler(
663 EventHandler::READ | EventHandler::PERSIST)) {
664 throw std::runtime_error("failed to register for accept events");
669 void AsyncServerSocket::pauseAccepting() {
670 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
672 for (auto& handler : sockets_) {
673 handler. unregisterHandler();
676 // If we were in the accept backoff state, disable the backoff timeout
677 if (backoffTimeout_) {
678 backoffTimeout_->cancelTimeout();
682 int AsyncServerSocket::createSocket(int family) {
683 int fd = fsp::socket(family, SOCK_STREAM, 0);
685 folly::throwSystemError(errno, "error creating async server socket");
689 setupSocket(fd, family);
697 void AsyncServerSocket::setupSocket(int fd, int family) {
698 // Put the socket in non-blocking mode
699 if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) {
700 folly::throwSystemError(errno,
701 "failed to put socket in non-blocking mode");
704 // Set reuseaddr to avoid 2MSL delay on server restart
706 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) != 0) {
707 // This isn't a fatal error; just log an error message and continue
708 LOG(ERROR) << "failed to set SO_REUSEADDR on async server socket " << errno;
711 // Set reuseport to support multiple accept threads
713 if (reusePortEnabled_ &&
714 setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(int)) != 0) {
715 LOG(ERROR) << "failed to set SO_REUSEPORT on async server socket "
718 folly::throwSystemError(errno, "failed to bind to the async server socket");
720 SocketAddress address;
721 address.setFromLocalAddress(fd);
722 folly::throwSystemError(errno,
723 "failed to bind to async server socket: " +
728 // Set keepalive as desired
729 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE,
730 (keepAliveEnabled_) ? &one : &zero, sizeof(int)) != 0) {
731 LOG(ERROR) << "failed to set SO_KEEPALIVE on async server socket: " <<
735 // Setup FD_CLOEXEC flag
737 (-1 == folly::setCloseOnExec(fd, closeOnExec_))) {
738 LOG(ERROR) << "failed to set FD_CLOEXEC on async server socket: " <<
742 // Set TCP nodelay if available, MAC OS X Hack
743 // See http://lists.danga.com/pipermail/memcached/2005-March/001240.html
745 if (family != AF_UNIX) {
746 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) != 0) {
747 // This isn't a fatal error; just log an error message and continue
748 LOG(ERROR) << "failed to set TCP_NODELAY on async server socket: " <<
755 if (tfo_ && detail::tfo_enable(fd, tfoMaxQueueSize_) != 0) {
756 // This isn't a fatal error; just log an error message and continue
757 LOG(WARNING) << "failed to set TCP_FASTOPEN on async server socket: "
758 << folly::errnoStr(errno);
762 if (shutdownSocketSet_) {
763 shutdownSocketSet_->add(fd);
767 void AsyncServerSocket::handlerReady(uint16_t /* events */,
769 sa_family_t addressFamily) noexcept {
770 assert(!callbacks_.empty());
771 DestructorGuard dg(this);
773 // Only accept up to maxAcceptAtOnce_ connections at a time,
774 // to avoid starving other I/O handlers using this EventBase.
775 for (uint32_t n = 0; n < maxAcceptAtOnce_; ++n) {
776 SocketAddress address;
778 sockaddr_storage addrStorage;
779 socklen_t addrLen = sizeof(addrStorage);
780 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
782 // In some cases, accept() doesn't seem to update these correctly.
783 saddr->sa_family = addressFamily;
784 if (addressFamily == AF_UNIX) {
785 addrLen = sizeof(struct sockaddr_un);
788 // Accept a new client socket
790 int clientSocket = accept4(fd, saddr, &addrLen, SOCK_NONBLOCK);
792 int clientSocket = accept(fd, saddr, &addrLen);
795 address.setFromSockaddr(saddr, addrLen);
797 if (clientSocket >= 0 && connectionEventCallback_) {
798 connectionEventCallback_->onConnectionAccepted(clientSocket, address);
801 std::chrono::time_point<std::chrono::steady_clock> nowMs =
802 std::chrono::steady_clock::now();
803 auto timeSinceLastAccept = std::max<int64_t>(
805 nowMs.time_since_epoch().count() -
806 lastAccepTimestamp_.time_since_epoch().count());
807 lastAccepTimestamp_ = nowMs;
808 if (acceptRate_ < 1) {
809 acceptRate_ *= 1 + acceptRateAdjustSpeed_ * timeSinceLastAccept;
810 if (acceptRate_ >= 1) {
812 } else if (rand() > acceptRate_ * RAND_MAX) {
813 ++numDroppedConnections_;
814 if (clientSocket >= 0) {
815 closeNoInt(clientSocket);
816 if (connectionEventCallback_) {
817 connectionEventCallback_->onConnectionDropped(clientSocket,
825 if (clientSocket < 0) {
826 if (errno == EAGAIN) {
827 // No more sockets to accept right now.
828 // Check for this code first, since it's the most common.
830 } else if (errno == EMFILE || errno == ENFILE) {
831 // We're out of file descriptors. Perhaps we're accepting connections
832 // too quickly. Pause accepting briefly to back off and give the server
833 // a chance to recover.
834 LOG(ERROR) << "accept failed: out of file descriptors; entering accept "
838 // Dispatch the error message
839 dispatchError("accept() failed", errno);
841 dispatchError("accept() failed", errno);
843 if (connectionEventCallback_) {
844 connectionEventCallback_->onConnectionAcceptError(errno);
849 #ifndef SOCK_NONBLOCK
850 // Explicitly set the new connection to non-blocking mode
851 if (fcntl(clientSocket, F_SETFL, O_NONBLOCK) != 0) {
852 closeNoInt(clientSocket);
853 dispatchError("failed to set accepted socket to non-blocking mode",
855 if (connectionEventCallback_) {
856 connectionEventCallback_->onConnectionDropped(clientSocket, address);
862 // Inform the callback about the new connection
863 dispatchSocket(clientSocket, std::move(address));
865 // If we aren't accepting any more, break out of the loop
866 if (!accepting_ || callbacks_.empty()) {
872 void AsyncServerSocket::dispatchSocket(int socket,
873 SocketAddress&& address) {
874 uint32_t startingIndex = callbackIndex_;
876 // Short circuit if the callback is in the primary EventBase thread
878 CallbackInfo *info = nextCallback();
879 if (info->eventBase == nullptr) {
880 info->callback->connectionAccepted(socket, address);
884 const SocketAddress addr(address);
885 // Create a message to send over the notification queue
887 msg.type = MessageType::MSG_NEW_CONN;
888 msg.address = std::move(address);
891 // Loop until we find a free queue to write to
893 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
894 if (connectionEventCallback_) {
895 connectionEventCallback_->onConnectionEnqueuedForAcceptorCallback(
903 // We couldn't add to queue. Fall through to below
905 ++numDroppedConnections_;
906 if (acceptRateAdjustSpeed_ > 0) {
907 // aggressively decrease accept rate when in trouble
908 static const double kAcceptRateDecreaseSpeed = 0.1;
909 acceptRate_ *= 1 - kAcceptRateDecreaseSpeed;
913 if (callbackIndex_ == startingIndex) {
914 // The notification queue was full
915 // We can't really do anything at this point other than close the socket.
917 // This should only happen if a user's service is behaving extremely
918 // badly and none of the EventBase threads are looping fast enough to
919 // process the incoming connections. If the service is overloaded, it
920 // should use pauseAccepting() to temporarily back off accepting new
921 // connections, before they reach the point where their threads can't
922 // even accept new messages.
923 LOG(ERROR) << "failed to dispatch newly accepted socket:"
924 << " all accept callback queues are full";
926 if (connectionEventCallback_) {
927 connectionEventCallback_->onConnectionDropped(socket, addr);
932 info = nextCallback();
936 void AsyncServerSocket::dispatchError(const char *msgstr, int errnoValue) {
937 uint32_t startingIndex = callbackIndex_;
938 CallbackInfo *info = nextCallback();
940 // Create a message to send over the notification queue
942 msg.type = MessageType::MSG_ERROR;
943 msg.err = errnoValue;
944 msg.msg = std::move(msgstr);
947 // Short circuit if the callback is in the primary EventBase thread
948 if (info->eventBase == nullptr) {
949 std::runtime_error ex(
950 std::string(msgstr) + folly::to<std::string>(errnoValue));
951 info->callback->acceptError(ex);
955 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
958 // Fall through and try another callback
960 if (callbackIndex_ == startingIndex) {
961 // The notification queues for all of the callbacks were full.
962 // We can't really do anything at this point.
963 LOG(ERROR) << "failed to dispatch accept error: all accept callback "
964 "queues are full: error msg: " <<
965 msg.msg.c_str() << errnoValue;
968 info = nextCallback();
972 void AsyncServerSocket::enterBackoff() {
973 // If this is the first time we have entered the backoff state,
974 // allocate backoffTimeout_.
975 if (backoffTimeout_ == nullptr) {
977 backoffTimeout_ = new BackoffTimeout(this);
978 } catch (const std::bad_alloc&) {
979 // Man, we couldn't even allocate the timer to re-enable accepts.
980 // We must be in pretty bad shape. Don't pause accepting for now,
981 // since we won't be able to re-enable ourselves later.
982 LOG(ERROR) << "failed to allocate AsyncServerSocket backoff"
983 << " timer; unable to temporarly pause accepting";
984 if (connectionEventCallback_) {
985 connectionEventCallback_->onBackoffError();
991 // For now, we simply pause accepting for 1 second.
993 // We could add some smarter backoff calculation here in the future. (e.g.,
994 // start sleeping for longer if we keep hitting the backoff frequently.)
995 // Typically the user needs to figure out why the server is overloaded and
996 // fix it in some other way, though. The backoff timer is just a simple
997 // mechanism to try and give the connection processing code a little bit of
998 // breathing room to catch up, and to avoid just spinning and failing to
999 // accept over and over again.
1000 const uint32_t timeoutMS = 1000;
1001 if (!backoffTimeout_->scheduleTimeout(timeoutMS)) {
1002 LOG(ERROR) << "failed to schedule AsyncServerSocket backoff timer;"
1003 << "unable to temporarly pause accepting";
1004 if (connectionEventCallback_) {
1005 connectionEventCallback_->onBackoffError();
1010 // The backoff timer is scheduled to re-enable accepts.
1011 // Go ahead and disable accepts for now. We leave accepting_ set to true,
1012 // since that tracks the desired state requested by the user.
1013 for (auto& handler : sockets_) {
1014 handler.unregisterHandler();
1016 if (connectionEventCallback_) {
1017 connectionEventCallback_->onBackoffStarted();
1021 void AsyncServerSocket::backoffTimeoutExpired() {
1022 // accepting_ should still be true.
1023 // If pauseAccepting() was called while in the backoff state it will cancel
1024 // the backoff timeout.
1026 // We can't be detached from the EventBase without being paused
1027 assert(eventBase_ != nullptr && eventBase_->isInEventBaseThread());
1029 // If all of the callbacks were removed, we shouldn't re-enable accepts
1030 if (callbacks_.empty()) {
1031 if (connectionEventCallback_) {
1032 connectionEventCallback_->onBackoffEnded();
1037 // Register the handler.
1038 for (auto& handler : sockets_) {
1039 if (!handler.registerHandler(
1040 EventHandler::READ | EventHandler::PERSIST)) {
1041 // We're hosed. We could just re-schedule backoffTimeout_ to
1042 // re-try again after a little bit. However, we don't want to
1043 // loop retrying forever if we can't re-enable accepts. Just
1044 // abort the entire program in this state; things are really bad
1045 // and restarting the entire server is probably the best remedy.
1047 << "failed to re-enable AsyncServerSocket accepts after backoff; "
1052 if (connectionEventCallback_) {
1053 connectionEventCallback_->onBackoffEnded();