2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_FORMAT_MACROS
18 #define __STDC_FORMAT_MACROS
21 #include <folly/io/async/AsyncServerSocket.h>
23 #include <folly/FileUtil.h>
24 #include <folly/SocketAddress.h>
25 #include <folly/String.h>
26 #include <folly/detail/SocketFastOpen.h>
27 #include <folly/io/async/EventBase.h>
28 #include <folly/io/async/NotificationQueue.h>
29 #include <folly/portability/Fcntl.h>
30 #include <folly/portability/Sockets.h>
31 #include <folly/portability/Unistd.h>
35 #include <sys/types.h>
37 namespace fsp = folly::portability::sockets;
41 const uint32_t AsyncServerSocket::kDefaultMaxAcceptAtOnce;
42 const uint32_t AsyncServerSocket::kDefaultCallbackAcceptAtOnce;
43 const uint32_t AsyncServerSocket::kDefaultMaxMessagesInQueue;
45 int setCloseOnExec(int fd, int value) {
46 // Read the current flags
47 int old_flags = fcntl(fd, F_GETFD, 0);
49 // If reading the flags failed, return error indication now
53 // Set just the flag we want to set
56 new_flags = old_flags | FD_CLOEXEC;
58 new_flags = old_flags & ~FD_CLOEXEC;
60 // Store modified flag word in the descriptor
61 return fcntl(fd, F_SETFD, new_flags);
64 void AsyncServerSocket::RemoteAcceptor::start(
65 EventBase* eventBase, uint32_t maxAtOnce, uint32_t maxInQueue) {
66 setMaxReadAtOnce(maxAtOnce);
67 queue_.setMaxQueueSize(maxInQueue);
69 if (!eventBase->runInEventBaseThread([=](){
70 callback_->acceptStarted();
71 this->startConsuming(eventBase, &queue_);
73 throw std::invalid_argument("unable to start waiting on accept "
74 "notification queue in the specified "
79 void AsyncServerSocket::RemoteAcceptor::stop(
80 EventBase* eventBase, AcceptCallback* callback) {
81 if (!eventBase->runInEventBaseThread([=](){
82 callback->acceptStopped();
85 throw std::invalid_argument("unable to start waiting on accept "
86 "notification queue in the specified "
91 void AsyncServerSocket::RemoteAcceptor::messageAvailable(
95 case MessageType::MSG_NEW_CONN:
97 if (connectionEventCallback_) {
98 connectionEventCallback_->onConnectionDequeuedByAcceptorCallback(
101 callback_->connectionAccepted(msg.fd, msg.address);
104 case MessageType::MSG_ERROR:
106 std::runtime_error ex(msg.msg);
107 callback_->acceptError(ex);
112 LOG(ERROR) << "invalid accept notification message type "
114 std::runtime_error ex(
115 "received invalid accept notification message type");
116 callback_->acceptError(ex);
122 * AsyncServerSocket::BackoffTimeout
124 class AsyncServerSocket::BackoffTimeout : public AsyncTimeout {
126 // Disallow copy, move, and default constructors.
127 BackoffTimeout(BackoffTimeout&&) = delete;
128 BackoffTimeout(AsyncServerSocket* socket)
129 : AsyncTimeout(socket->getEventBase()), socket_(socket) {}
131 void timeoutExpired() noexcept override { socket_->backoffTimeoutExpired(); }
134 AsyncServerSocket* socket_;
138 * AsyncServerSocket methods
141 AsyncServerSocket::AsyncServerSocket(EventBase* eventBase)
142 : eventBase_(eventBase),
144 maxAcceptAtOnce_(kDefaultMaxAcceptAtOnce),
145 maxNumMsgsInQueue_(kDefaultMaxMessagesInQueue),
146 acceptRateAdjustSpeed_(0),
148 lastAccepTimestamp_(std::chrono::steady_clock::now()),
149 numDroppedConnections_(0),
151 backoffTimeout_(nullptr),
153 keepAliveEnabled_(true),
155 shutdownSocketSet_(nullptr) {
158 void AsyncServerSocket::setShutdownSocketSet(ShutdownSocketSet* newSS) {
159 if (shutdownSocketSet_ == newSS) {
162 if (shutdownSocketSet_) {
163 for (auto& h : sockets_) {
164 shutdownSocketSet_->remove(h.socket_);
167 shutdownSocketSet_ = newSS;
168 if (shutdownSocketSet_) {
169 for (auto& h : sockets_) {
170 shutdownSocketSet_->add(h.socket_);
175 AsyncServerSocket::~AsyncServerSocket() {
176 assert(callbacks_.empty());
179 int AsyncServerSocket::stopAccepting(int shutdownFlags) {
181 for (auto& handler : sockets_) {
182 VLOG(10) << "AsyncServerSocket::stopAccepting " << this <<
185 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
187 // When destroy is called, unregister and close the socket immediately.
190 // Close the sockets in reverse order as they were opened to avoid
191 // the condition where another process concurrently tries to open
192 // the same port, succeed to bind the first socket but fails on the
193 // second because it hasn't been closed yet.
194 for (; !sockets_.empty(); sockets_.pop_back()) {
195 auto& handler = sockets_.back();
196 handler.unregisterHandler();
197 if (shutdownSocketSet_) {
198 shutdownSocketSet_->close(handler.socket_);
199 } else if (shutdownFlags >= 0) {
200 result = shutdownNoInt(handler.socket_, shutdownFlags);
201 pendingCloseSockets_.push_back(handler.socket_);
203 closeNoInt(handler.socket_);
207 // Destroy the backoff timout. This will cancel it if it is running.
208 delete backoffTimeout_;
209 backoffTimeout_ = nullptr;
211 // Close all of the callback queues to notify them that they are being
212 // destroyed. No one should access the AsyncServerSocket any more once
213 // destroy() is called. However, clear out callbacks_ before invoking the
214 // accept callbacks just in case. This will potentially help us detect the
215 // bug if one of the callbacks calls addAcceptCallback() or
216 // removeAcceptCallback().
217 std::vector<CallbackInfo> callbacksCopy;
218 callbacks_.swap(callbacksCopy);
219 for (std::vector<CallbackInfo>::iterator it = callbacksCopy.begin();
220 it != callbacksCopy.end();
222 it->consumer->stop(it->eventBase, it->callback);
228 void AsyncServerSocket::destroy() {
230 for (auto s : pendingCloseSockets_) {
233 // Then call DelayedDestruction::destroy() to take care of
234 // whether or not we need immediate or delayed destruction
235 DelayedDestruction::destroy();
238 void AsyncServerSocket::attachEventBase(EventBase *eventBase) {
239 assert(eventBase_ == nullptr);
240 assert(eventBase->isInEventBaseThread());
242 eventBase_ = eventBase;
243 for (auto& handler : sockets_) {
244 handler.attachEventBase(eventBase);
248 void AsyncServerSocket::detachEventBase() {
249 assert(eventBase_ != nullptr);
250 assert(eventBase_->isInEventBaseThread());
253 eventBase_ = nullptr;
254 for (auto& handler : sockets_) {
255 handler.detachEventBase();
259 void AsyncServerSocket::useExistingSockets(const std::vector<int>& fds) {
260 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
262 if (sockets_.size() > 0) {
263 throw std::invalid_argument(
264 "cannot call useExistingSocket() on a "
265 "AsyncServerSocket that already has a socket");
269 // Set addressFamily_ from this socket.
270 // Note that the socket may not have been bound yet, but
271 // setFromLocalAddress() will still work and get the correct address family.
272 // We will update addressFamily_ again anyway if bind() is called later.
273 SocketAddress address;
274 address.setFromLocalAddress(fd);
276 setupSocket(fd, address.getFamily());
277 sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
278 sockets_.back().changeHandlerFD(fd);
282 void AsyncServerSocket::useExistingSocket(int fd) {
283 useExistingSockets({fd});
286 void AsyncServerSocket::bindSocket(
288 const SocketAddress& address,
289 bool isExistingSocket) {
290 sockaddr_storage addrStorage;
291 address.getAddress(&addrStorage);
292 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
293 if (fsp::bind(fd, saddr, address.getActualSize()) != 0) {
294 if (!isExistingSocket) {
297 folly::throwSystemError(errno,
298 "failed to bind to async server socket: " +
302 // If we just created this socket, update the EventHandler and set socket_
303 if (!isExistingSocket) {
304 sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
308 void AsyncServerSocket::bind(const SocketAddress& address) {
309 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
311 // useExistingSocket() may have been called to initialize socket_ already.
312 // However, in the normal case we need to create a new socket now.
313 // Don't set socket_ yet, so that socket_ will remain uninitialized if an
316 if (sockets_.size() == 0) {
317 fd = createSocket(address.getFamily());
318 } else if (sockets_.size() == 1) {
319 if (address.getFamily() != sockets_[0].addressFamily_) {
320 throw std::invalid_argument(
321 "Attempted to bind address to socket with "
322 "different address family");
324 fd = sockets_[0].socket_;
326 throw std::invalid_argument(
327 "Attempted to bind to multiple fds");
330 bindSocket(fd, address, !sockets_.empty());
333 void AsyncServerSocket::bind(
334 const std::vector<IPAddress>& ipAddresses,
336 if (ipAddresses.empty()) {
337 throw std::invalid_argument("No ip addresses were provided");
339 if (!sockets_.empty()) {
340 throw std::invalid_argument("Cannot call bind on a AsyncServerSocket "
341 "that already has a socket.");
344 for (const IPAddress& ipAddress : ipAddresses) {
345 SocketAddress address(ipAddress.toFullyQualified(), port);
346 int fd = createSocket(address.getFamily());
348 bindSocket(fd, address, false);
350 if (sockets_.size() == 0) {
351 throw std::runtime_error(
352 "did not bind any async server socket for port and addresses");
356 void AsyncServerSocket::bind(uint16_t port) {
357 struct addrinfo hints, *res, *res0;
358 char sport[sizeof("65536")];
360 memset(&hints, 0, sizeof(hints));
361 hints.ai_family = AF_UNSPEC;
362 hints.ai_socktype = SOCK_STREAM;
363 hints.ai_flags = AI_PASSIVE;
364 snprintf(sport, sizeof(sport), "%u", port);
366 if (getaddrinfo(nullptr, sport, &hints, &res0)) {
367 throw std::invalid_argument(
368 "Attempted to bind address to socket with "
372 SCOPE_EXIT { freeaddrinfo(res0); };
374 auto setupAddress = [&] (struct addrinfo* res) {
375 int s = fsp::socket(res->ai_family, res->ai_socktype, res->ai_protocol);
376 // IPv6/IPv4 may not be supported by the kernel
377 if (s < 0 && errno == EAFNOSUPPORT) {
383 setupSocket(s, res->ai_family);
389 if (res->ai_family == AF_INET6) {
391 CHECK(0 == setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
392 &v6only, sizeof(v6only)));
395 SocketAddress address;
396 address.setFromLocalAddress(s);
398 sockets_.emplace_back(eventBase_, s, this, address.getFamily());
400 // Bind to the socket
401 if (fsp::bind(s, res->ai_addr, res->ai_addrlen) != 0) {
402 folly::throwSystemError(
404 "failed to bind to async server socket for port ",
405 SocketAddress::getPortFrom(res->ai_addr),
407 SocketAddress::getFamilyNameFrom(res->ai_addr, "<unknown>"));
411 const int kNumTries = 25;
412 for (int tries = 1; true; tries++) {
413 // Prefer AF_INET6 addresses. RFC 3484 mandates that getaddrinfo
414 // should return IPv6 first and then IPv4 addresses, but glibc's
415 // getaddrinfo(nullptr) with AI_PASSIVE returns:
416 // - 0.0.0.0 (IPv4-only)
417 // - :: (IPv6+IPv4) in this order
418 // See: https://sourceware.org/bugzilla/show_bug.cgi?id=9981
419 for (res = res0; res; res = res->ai_next) {
420 if (res->ai_family == AF_INET6) {
425 // If port == 0, then we should try to bind to the same port on ipv4 and
426 // ipv6. So if we did bind to ipv6, figure out that port and use it.
427 if (sockets_.size() == 1 && port == 0) {
428 SocketAddress address;
429 address.setFromLocalAddress(sockets_.back().socket_);
430 snprintf(sport, sizeof(sport), "%u", address.getPort());
432 CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
436 for (res = res0; res; res = res->ai_next) {
437 if (res->ai_family != AF_INET6) {
441 } catch (const std::system_error& e) {
442 // If we can't bind to the same port on ipv4 as ipv6 when using
443 // port=0 then we will retry again before giving up after
444 // kNumTries attempts. We do this by closing the sockets that
445 // were opened, then restarting from scratch.
446 if (port == 0 && !sockets_.empty() && tries != kNumTries) {
447 for (const auto& socket : sockets_) {
448 if (socket.socket_ <= 0) {
450 } else if (shutdownSocketSet_) {
451 shutdownSocketSet_->close(socket.socket_);
453 closeNoInt(socket.socket_);
457 snprintf(sport, sizeof(sport), "%u", port);
459 CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
469 if (sockets_.size() == 0) {
470 throw std::runtime_error(
471 "did not bind any async server socket for port");
475 void AsyncServerSocket::listen(int backlog) {
476 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
479 for (auto& handler : sockets_) {
480 if (fsp::listen(handler.socket_, backlog) == -1) {
481 folly::throwSystemError(errno,
482 "failed to listen on async server socket");
487 void AsyncServerSocket::getAddress(SocketAddress* addressReturn) const {
488 CHECK(sockets_.size() >= 1);
489 VLOG_IF(2, sockets_.size() > 1)
490 << "Warning: getAddress() called and multiple addresses available ("
491 << sockets_.size() << "). Returning only the first one.";
493 addressReturn->setFromLocalAddress(sockets_[0].socket_);
496 std::vector<SocketAddress> AsyncServerSocket::getAddresses()
498 CHECK(sockets_.size() >= 1);
499 auto tsaVec = std::vector<SocketAddress>(sockets_.size());
500 auto tsaIter = tsaVec.begin();
501 for (const auto& socket : sockets_) {
502 (tsaIter++)->setFromLocalAddress(socket.socket_);
507 void AsyncServerSocket::addAcceptCallback(AcceptCallback *callback,
508 EventBase *eventBase,
509 uint32_t maxAtOnce) {
510 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
512 // If this is the first accept callback and we are supposed to be accepting,
513 // start accepting once the callback is installed.
514 bool runStartAccepting = accepting_ && callbacks_.empty();
517 eventBase = eventBase_; // Run in AsyncServerSocket's eventbase
520 callbacks_.emplace_back(callback, eventBase);
522 // Start the remote acceptor.
524 // It would be nice if we could avoid starting the remote acceptor if
525 // eventBase == eventBase_. However, that would cause issues if
526 // detachEventBase() and attachEventBase() were ever used to change the
527 // primary EventBase for the server socket. Therefore we require the caller
528 // to specify a nullptr EventBase if they want to ensure that the callback is
529 // always invoked in the primary EventBase, and to be able to invoke that
530 // callback more efficiently without having to use a notification queue.
531 RemoteAcceptor* acceptor = nullptr;
533 acceptor = new RemoteAcceptor(callback, connectionEventCallback_);
534 acceptor->start(eventBase, maxAtOnce, maxNumMsgsInQueue_);
536 callbacks_.pop_back();
540 callbacks_.back().consumer = acceptor;
542 // If this is the first accept callback and we are supposed to be accepting,
544 if (runStartAccepting) {
549 void AsyncServerSocket::removeAcceptCallback(AcceptCallback *callback,
550 EventBase *eventBase) {
551 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
553 // Find the matching AcceptCallback.
554 // We just do a simple linear search; we don't expect removeAcceptCallback()
555 // to be called frequently, and we expect there to only be a small number of
557 std::vector<CallbackInfo>::iterator it = callbacks_.begin();
560 if (it == callbacks_.end()) {
561 throw std::runtime_error("AsyncServerSocket::removeAcceptCallback(): "
562 "accept callback not found");
564 if (it->callback == callback &&
565 (it->eventBase == eventBase || eventBase == nullptr)) {
572 // Remove this callback from callbacks_.
574 // Do this before invoking the acceptStopped() callback, in case
575 // acceptStopped() invokes one of our methods that examines callbacks_.
577 // Save a copy of the CallbackInfo first.
578 CallbackInfo info(*it);
579 callbacks_.erase(it);
580 if (n < callbackIndex_) {
581 // We removed an element before callbackIndex_. Move callbackIndex_ back
582 // one step, since things after n have been shifted back by 1.
585 // We removed something at or after callbackIndex_.
586 // If we removed the last element and callbackIndex_ was pointing at it,
587 // we need to reset callbackIndex_ to 0.
588 if (callbackIndex_ >= callbacks_.size()) {
593 info.consumer->stop(info.eventBase, info.callback);
595 // If we are supposed to be accepting but the last accept callback
596 // was removed, unregister for events until a callback is added.
597 if (accepting_ && callbacks_.empty()) {
598 for (auto& handler : sockets_) {
599 handler.unregisterHandler();
604 void AsyncServerSocket::startAccepting() {
605 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
608 if (callbacks_.empty()) {
609 // We can't actually begin accepting if no callbacks are defined.
610 // Wait until a callback is added to start accepting.
614 for (auto& handler : sockets_) {
615 if (!handler.registerHandler(
616 EventHandler::READ | EventHandler::PERSIST)) {
617 throw std::runtime_error("failed to register for accept events");
622 void AsyncServerSocket::pauseAccepting() {
623 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
625 for (auto& handler : sockets_) {
626 handler. unregisterHandler();
629 // If we were in the accept backoff state, disable the backoff timeout
630 if (backoffTimeout_) {
631 backoffTimeout_->cancelTimeout();
635 int AsyncServerSocket::createSocket(int family) {
636 int fd = fsp::socket(family, SOCK_STREAM, 0);
638 folly::throwSystemError(errno, "error creating async server socket");
642 setupSocket(fd, family);
650 void AsyncServerSocket::setupSocket(int fd, int family) {
651 // Put the socket in non-blocking mode
652 if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) {
653 folly::throwSystemError(errno,
654 "failed to put socket in non-blocking mode");
657 // Set reuseaddr to avoid 2MSL delay on server restart
659 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) != 0) {
660 // This isn't a fatal error; just log an error message and continue
661 LOG(ERROR) << "failed to set SO_REUSEADDR on async server socket " << errno;
664 // Set reuseport to support multiple accept threads
666 if (reusePortEnabled_ &&
667 setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(int)) != 0) {
668 LOG(ERROR) << "failed to set SO_REUSEPORT on async server socket "
671 folly::throwSystemError(errno, "failed to bind to the async server socket");
673 SocketAddress address;
674 address.setFromLocalAddress(fd);
675 folly::throwSystemError(errno,
676 "failed to bind to async server socket: " +
681 // Set keepalive as desired
682 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE,
683 (keepAliveEnabled_) ? &one : &zero, sizeof(int)) != 0) {
684 LOG(ERROR) << "failed to set SO_KEEPALIVE on async server socket: " <<
688 // Setup FD_CLOEXEC flag
690 (-1 == folly::setCloseOnExec(fd, closeOnExec_))) {
691 LOG(ERROR) << "failed to set FD_CLOEXEC on async server socket: " <<
695 // Set TCP nodelay if available, MAC OS X Hack
696 // See http://lists.danga.com/pipermail/memcached/2005-March/001240.html
698 if (family != AF_UNIX) {
699 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) != 0) {
700 // This isn't a fatal error; just log an error message and continue
701 LOG(ERROR) << "failed to set TCP_NODELAY on async server socket: " <<
708 if (tfo_ && detail::tfo_enable(fd, tfoMaxQueueSize_) != 0) {
709 // This isn't a fatal error; just log an error message and continue
710 LOG(WARNING) << "failed to set TCP_FASTOPEN on async server socket: "
711 << folly::errnoStr(errno);
715 if (shutdownSocketSet_) {
716 shutdownSocketSet_->add(fd);
720 void AsyncServerSocket::handlerReady(uint16_t /* events */,
722 sa_family_t addressFamily) noexcept {
723 assert(!callbacks_.empty());
724 DestructorGuard dg(this);
726 // Only accept up to maxAcceptAtOnce_ connections at a time,
727 // to avoid starving other I/O handlers using this EventBase.
728 for (uint32_t n = 0; n < maxAcceptAtOnce_; ++n) {
729 SocketAddress address;
731 sockaddr_storage addrStorage;
732 socklen_t addrLen = sizeof(addrStorage);
733 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
735 // In some cases, accept() doesn't seem to update these correctly.
736 saddr->sa_family = addressFamily;
737 if (addressFamily == AF_UNIX) {
738 addrLen = sizeof(struct sockaddr_un);
741 // Accept a new client socket
743 int clientSocket = accept4(fd, saddr, &addrLen, SOCK_NONBLOCK);
745 int clientSocket = accept(fd, saddr, &addrLen);
748 address.setFromSockaddr(saddr, addrLen);
750 if (clientSocket >= 0 && connectionEventCallback_) {
751 connectionEventCallback_->onConnectionAccepted(clientSocket, address);
754 std::chrono::time_point<std::chrono::steady_clock> nowMs =
755 std::chrono::steady_clock::now();
756 auto timeSinceLastAccept = std::max<int64_t>(
758 nowMs.time_since_epoch().count() -
759 lastAccepTimestamp_.time_since_epoch().count());
760 lastAccepTimestamp_ = nowMs;
761 if (acceptRate_ < 1) {
762 acceptRate_ *= 1 + acceptRateAdjustSpeed_ * timeSinceLastAccept;
763 if (acceptRate_ >= 1) {
765 } else if (rand() > acceptRate_ * RAND_MAX) {
766 ++numDroppedConnections_;
767 if (clientSocket >= 0) {
768 closeNoInt(clientSocket);
769 if (connectionEventCallback_) {
770 connectionEventCallback_->onConnectionDropped(clientSocket,
778 if (clientSocket < 0) {
779 if (errno == EAGAIN) {
780 // No more sockets to accept right now.
781 // Check for this code first, since it's the most common.
783 } else if (errno == EMFILE || errno == ENFILE) {
784 // We're out of file descriptors. Perhaps we're accepting connections
785 // too quickly. Pause accepting briefly to back off and give the server
786 // a chance to recover.
787 LOG(ERROR) << "accept failed: out of file descriptors; entering accept "
791 // Dispatch the error message
792 dispatchError("accept() failed", errno);
794 dispatchError("accept() failed", errno);
796 if (connectionEventCallback_) {
797 connectionEventCallback_->onConnectionAcceptError(errno);
802 #ifndef SOCK_NONBLOCK
803 // Explicitly set the new connection to non-blocking mode
804 if (fcntl(clientSocket, F_SETFL, O_NONBLOCK) != 0) {
805 closeNoInt(clientSocket);
806 dispatchError("failed to set accepted socket to non-blocking mode",
808 if (connectionEventCallback_) {
809 connectionEventCallback_->onConnectionDropped(clientSocket, address);
815 // Inform the callback about the new connection
816 dispatchSocket(clientSocket, std::move(address));
818 // If we aren't accepting any more, break out of the loop
819 if (!accepting_ || callbacks_.empty()) {
825 void AsyncServerSocket::dispatchSocket(int socket,
826 SocketAddress&& address) {
827 uint32_t startingIndex = callbackIndex_;
829 // Short circuit if the callback is in the primary EventBase thread
831 CallbackInfo *info = nextCallback();
832 if (info->eventBase == nullptr) {
833 info->callback->connectionAccepted(socket, address);
837 const SocketAddress addr(address);
838 // Create a message to send over the notification queue
840 msg.type = MessageType::MSG_NEW_CONN;
841 msg.address = std::move(address);
844 // Loop until we find a free queue to write to
846 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
847 if (connectionEventCallback_) {
848 connectionEventCallback_->onConnectionEnqueuedForAcceptorCallback(
856 // We couldn't add to queue. Fall through to below
858 ++numDroppedConnections_;
859 if (acceptRateAdjustSpeed_ > 0) {
860 // aggressively decrease accept rate when in trouble
861 static const double kAcceptRateDecreaseSpeed = 0.1;
862 acceptRate_ *= 1 - kAcceptRateDecreaseSpeed;
866 if (callbackIndex_ == startingIndex) {
867 // The notification queue was full
868 // We can't really do anything at this point other than close the socket.
870 // This should only happen if a user's service is behaving extremely
871 // badly and none of the EventBase threads are looping fast enough to
872 // process the incoming connections. If the service is overloaded, it
873 // should use pauseAccepting() to temporarily back off accepting new
874 // connections, before they reach the point where their threads can't
875 // even accept new messages.
876 LOG(ERROR) << "failed to dispatch newly accepted socket:"
877 << " all accept callback queues are full";
879 if (connectionEventCallback_) {
880 connectionEventCallback_->onConnectionDropped(socket, addr);
885 info = nextCallback();
889 void AsyncServerSocket::dispatchError(const char *msgstr, int errnoValue) {
890 uint32_t startingIndex = callbackIndex_;
891 CallbackInfo *info = nextCallback();
893 // Create a message to send over the notification queue
895 msg.type = MessageType::MSG_ERROR;
896 msg.err = errnoValue;
897 msg.msg = std::move(msgstr);
900 // Short circuit if the callback is in the primary EventBase thread
901 if (info->eventBase == nullptr) {
902 std::runtime_error ex(
903 std::string(msgstr) + folly::to<std::string>(errnoValue));
904 info->callback->acceptError(ex);
908 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
911 // Fall through and try another callback
913 if (callbackIndex_ == startingIndex) {
914 // The notification queues for all of the callbacks were full.
915 // We can't really do anything at this point.
916 LOG(ERROR) << "failed to dispatch accept error: all accept callback "
917 "queues are full: error msg: " <<
918 msg.msg.c_str() << errnoValue;
921 info = nextCallback();
925 void AsyncServerSocket::enterBackoff() {
926 // If this is the first time we have entered the backoff state,
927 // allocate backoffTimeout_.
928 if (backoffTimeout_ == nullptr) {
930 backoffTimeout_ = new BackoffTimeout(this);
931 } catch (const std::bad_alloc& ex) {
932 // Man, we couldn't even allocate the timer to re-enable accepts.
933 // We must be in pretty bad shape. Don't pause accepting for now,
934 // since we won't be able to re-enable ourselves later.
935 LOG(ERROR) << "failed to allocate AsyncServerSocket backoff"
936 << " timer; unable to temporarly pause accepting";
937 if (connectionEventCallback_) {
938 connectionEventCallback_->onBackoffError();
944 // For now, we simply pause accepting for 1 second.
946 // We could add some smarter backoff calculation here in the future. (e.g.,
947 // start sleeping for longer if we keep hitting the backoff frequently.)
948 // Typically the user needs to figure out why the server is overloaded and
949 // fix it in some other way, though. The backoff timer is just a simple
950 // mechanism to try and give the connection processing code a little bit of
951 // breathing room to catch up, and to avoid just spinning and failing to
952 // accept over and over again.
953 const uint32_t timeoutMS = 1000;
954 if (!backoffTimeout_->scheduleTimeout(timeoutMS)) {
955 LOG(ERROR) << "failed to schedule AsyncServerSocket backoff timer;"
956 << "unable to temporarly pause accepting";
957 if (connectionEventCallback_) {
958 connectionEventCallback_->onBackoffError();
963 // The backoff timer is scheduled to re-enable accepts.
964 // Go ahead and disable accepts for now. We leave accepting_ set to true,
965 // since that tracks the desired state requested by the user.
966 for (auto& handler : sockets_) {
967 handler.unregisterHandler();
969 if (connectionEventCallback_) {
970 connectionEventCallback_->onBackoffStarted();
974 void AsyncServerSocket::backoffTimeoutExpired() {
975 // accepting_ should still be true.
976 // If pauseAccepting() was called while in the backoff state it will cancel
977 // the backoff timeout.
979 // We can't be detached from the EventBase without being paused
980 assert(eventBase_ != nullptr && eventBase_->isInEventBaseThread());
982 // If all of the callbacks were removed, we shouldn't re-enable accepts
983 if (callbacks_.empty()) {
984 if (connectionEventCallback_) {
985 connectionEventCallback_->onBackoffEnded();
990 // Register the handler.
991 for (auto& handler : sockets_) {
992 if (!handler.registerHandler(
993 EventHandler::READ | EventHandler::PERSIST)) {
994 // We're hosed. We could just re-schedule backoffTimeout_ to
995 // re-try again after a little bit. However, we don't want to
996 // loop retrying forever if we can't re-enable accepts. Just
997 // abort the entire program in this state; things are really bad
998 // and restarting the entire server is probably the best remedy.
1000 << "failed to re-enable AsyncServerSocket accepts after backoff; "
1005 if (connectionEventCallback_) {
1006 connectionEventCallback_->onBackoffEnded();