2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/io/async/AsyncSocket.h>
19 #include <folly/ExceptionWrapper.h>
20 #include <folly/Format.h>
21 #include <folly/Portability.h>
22 #include <folly/SocketAddress.h>
23 #include <folly/io/Cursor.h>
24 #include <folly/io/IOBuf.h>
25 #include <folly/io/IOBufQueue.h>
26 #include <folly/portability/Fcntl.h>
27 #include <folly/portability/Sockets.h>
28 #include <folly/portability/SysUio.h>
29 #include <folly/portability/Unistd.h>
31 #include <boost/preprocessor/control/if.hpp>
34 #include <sys/types.h>
38 using std::unique_ptr;
40 namespace fsp = folly::portability::sockets;
44 static constexpr bool msgErrQueueSupported =
45 #ifdef FOLLY_HAVE_MSG_ERRQUEUE
49 #endif // FOLLY_HAVE_MSG_ERRQUEUE
51 // static members initializers
52 const AsyncSocket::OptionMap AsyncSocket::emptyOptionMap;
54 const AsyncSocketException socketClosedLocallyEx(
55 AsyncSocketException::END_OF_FILE, "socket closed locally");
56 const AsyncSocketException socketShutdownForWritesEx(
57 AsyncSocketException::END_OF_FILE, "socket shutdown for writes");
59 // TODO: It might help performance to provide a version of BytesWriteRequest that
60 // users could derive from, so we can avoid the extra allocation for each call
61 // to write()/writev(). We could templatize TFramedAsyncChannel just like the
62 // protocols are currently templatized for transports.
64 // We would need the version for external users where they provide the iovec
65 // storage space, and only our internal version would allocate it at the end of
68 /* The default WriteRequest implementation, used for write(), writev() and
71 * A new BytesWriteRequest operation is allocated on the heap for all write
72 * operations that cannot be completed immediately.
74 class AsyncSocket::BytesWriteRequest : public AsyncSocket::WriteRequest {
76 static BytesWriteRequest* newRequest(AsyncSocket* socket,
77 WriteCallback* callback,
80 uint32_t partialWritten,
81 uint32_t bytesWritten,
82 unique_ptr<IOBuf>&& ioBuf,
85 // Since we put a variable size iovec array at the end
86 // of each BytesWriteRequest, we have to manually allocate the memory.
87 void* buf = malloc(sizeof(BytesWriteRequest) +
88 (opCount * sizeof(struct iovec)));
90 throw std::bad_alloc();
93 return new(buf) BytesWriteRequest(socket, callback, ops, opCount,
94 partialWritten, bytesWritten,
95 std::move(ioBuf), flags);
98 void destroy() override {
99 this->~BytesWriteRequest();
103 WriteResult performWrite() override {
104 WriteFlags writeFlags = flags_;
105 if (getNext() != nullptr) {
106 writeFlags |= WriteFlags::CORK;
109 socket_->adjustZeroCopyFlags(writeFlags);
111 auto writeResult = socket_->performWrite(
112 getOps(), getOpCount(), writeFlags, &opsWritten_, &partialBytes_);
113 bytesWritten_ = writeResult.writeReturn > 0 ? writeResult.writeReturn : 0;
115 if (socket_->isZeroCopyRequest(writeFlags)) {
117 socket_->addZeroCopyBuf(std::move(ioBuf_));
119 socket_->addZeroCopyBuf(ioBuf_.get());
122 // this happens if at least one of the prev requests were sent
123 // with zero copy but not the last one
124 if (isComplete() && socket_->getZeroCopy() &&
125 socket_->containsZeroCopyBuf(ioBuf_.get())) {
126 socket_->setZeroCopyBuf(std::move(ioBuf_));
133 bool isComplete() override {
134 return opsWritten_ == getOpCount();
137 void consume() override {
138 // Advance opIndex_ forward by opsWritten_
139 opIndex_ += opsWritten_;
140 assert(opIndex_ < opCount_);
142 if (!socket_->isZeroCopyRequest(flags_)) {
143 // If we've finished writing any IOBufs, release them
145 for (uint32_t i = opsWritten_; i != 0; --i) {
147 ioBuf_ = ioBuf_->pop();
152 // Move partialBytes_ forward into the current iovec buffer
153 struct iovec* currentOp = writeOps_ + opIndex_;
154 assert((partialBytes_ < currentOp->iov_len) || (currentOp->iov_len == 0));
155 currentOp->iov_base =
156 reinterpret_cast<uint8_t*>(currentOp->iov_base) + partialBytes_;
157 currentOp->iov_len -= partialBytes_;
159 // Increment the totalBytesWritten_ count by bytesWritten_;
160 assert(bytesWritten_ >= 0);
161 totalBytesWritten_ += uint32_t(bytesWritten_);
165 BytesWriteRequest(AsyncSocket* socket,
166 WriteCallback* callback,
167 const struct iovec* ops,
169 uint32_t partialBytes,
170 uint32_t bytesWritten,
171 unique_ptr<IOBuf>&& ioBuf,
173 : AsyncSocket::WriteRequest(socket, callback)
177 , ioBuf_(std::move(ioBuf))
179 , partialBytes_(partialBytes)
180 , bytesWritten_(bytesWritten) {
181 memcpy(writeOps_, ops, sizeof(*ops) * opCount_);
184 // private destructor, to ensure callers use destroy()
185 ~BytesWriteRequest() override = default;
187 const struct iovec* getOps() const {
188 assert(opCount_ > opIndex_);
189 return writeOps_ + opIndex_;
192 uint32_t getOpCount() const {
193 assert(opCount_ > opIndex_);
194 return opCount_ - opIndex_;
197 uint32_t opCount_; ///< number of entries in writeOps_
198 uint32_t opIndex_; ///< current index into writeOps_
199 WriteFlags flags_; ///< set for WriteFlags
200 unique_ptr<IOBuf> ioBuf_; ///< underlying IOBuf, or nullptr if N/A
202 // for consume(), how much we wrote on the last write
203 uint32_t opsWritten_; ///< complete ops written
204 uint32_t partialBytes_; ///< partial bytes of incomplete op written
205 ssize_t bytesWritten_; ///< bytes written altogether
207 struct iovec writeOps_[]; ///< write operation(s) list
210 int AsyncSocket::SendMsgParamsCallback::getDefaultFlags(
211 folly::WriteFlags flags,
212 bool zeroCopyEnabled) noexcept {
213 int msg_flags = MSG_DONTWAIT;
215 #ifdef MSG_NOSIGNAL // Linux-only
216 msg_flags |= MSG_NOSIGNAL;
218 if (isSet(flags, WriteFlags::CORK)) {
219 // MSG_MORE tells the kernel we have more data to send, so wait for us to
220 // give it the rest of the data rather than immediately sending a partial
221 // frame, even when TCP_NODELAY is enabled.
222 msg_flags |= MSG_MORE;
225 #endif // MSG_NOSIGNAL
226 if (isSet(flags, WriteFlags::EOR)) {
227 // marks that this is the last byte of a record (response)
228 msg_flags |= MSG_EOR;
231 if (zeroCopyEnabled && isSet(flags, WriteFlags::WRITE_MSG_ZEROCOPY)) {
232 msg_flags |= MSG_ZEROCOPY;
239 static AsyncSocket::SendMsgParamsCallback defaultSendMsgParamsCallback;
242 AsyncSocket::AsyncSocket()
243 : eventBase_(nullptr),
244 writeTimeout_(this, nullptr),
245 ioHandler_(this, nullptr),
246 immediateReadHandler_(this) {
247 VLOG(5) << "new AsyncSocket()";
251 AsyncSocket::AsyncSocket(EventBase* evb)
253 writeTimeout_(this, evb),
254 ioHandler_(this, evb),
255 immediateReadHandler_(this) {
256 VLOG(5) << "new AsyncSocket(" << this << ", evb=" << evb << ")";
260 AsyncSocket::AsyncSocket(EventBase* evb,
261 const folly::SocketAddress& address,
262 uint32_t connectTimeout)
264 connect(nullptr, address, connectTimeout);
267 AsyncSocket::AsyncSocket(EventBase* evb,
268 const std::string& ip,
270 uint32_t connectTimeout)
272 connect(nullptr, ip, port, connectTimeout);
275 AsyncSocket::AsyncSocket(EventBase* evb, int fd, uint32_t zeroCopyBufId)
276 : zeroCopyBufId_(zeroCopyBufId),
278 writeTimeout_(this, evb),
279 ioHandler_(this, evb, fd),
280 immediateReadHandler_(this) {
281 VLOG(5) << "new AsyncSocket(" << this << ", evb=" << evb << ", fd=" << fd
282 << ", zeroCopyBufId=" << zeroCopyBufId << ")";
286 state_ = StateEnum::ESTABLISHED;
289 AsyncSocket::AsyncSocket(AsyncSocket::UniquePtr oldAsyncSocket)
291 oldAsyncSocket->getEventBase(),
292 oldAsyncSocket->detachFd(),
293 oldAsyncSocket->getZeroCopyBufId()) {
294 preReceivedData_ = std::move(oldAsyncSocket->preReceivedData_);
297 // init() method, since constructor forwarding isn't supported in most
299 void AsyncSocket::init() {
301 eventBase_->dcheckIsInEventBaseThread();
304 state_ = StateEnum::UNINIT;
305 eventFlags_ = EventHandler::NONE;
308 maxReadsPerEvent_ = 16;
309 connectCallback_ = nullptr;
310 errMessageCallback_ = nullptr;
311 readCallback_ = nullptr;
312 writeReqHead_ = nullptr;
313 writeReqTail_ = nullptr;
314 wShutdownSocketSet_.reset();
315 appBytesWritten_ = 0;
316 appBytesReceived_ = 0;
317 sendMsgParamCallback_ = &defaultSendMsgParamsCallback;
320 AsyncSocket::~AsyncSocket() {
321 VLOG(7) << "actual destruction of AsyncSocket(this=" << this
322 << ", evb=" << eventBase_ << ", fd=" << fd_
323 << ", state=" << state_ << ")";
326 void AsyncSocket::destroy() {
327 VLOG(5) << "AsyncSocket::destroy(this=" << this << ", evb=" << eventBase_
328 << ", fd=" << fd_ << ", state=" << state_;
329 // When destroy is called, close the socket immediately
332 // Then call DelayedDestruction::destroy() to take care of
333 // whether or not we need immediate or delayed destruction
334 DelayedDestruction::destroy();
337 int AsyncSocket::detachFd() {
338 VLOG(6) << "AsyncSocket::detachFd(this=" << this << ", fd=" << fd_
339 << ", evb=" << eventBase_ << ", state=" << state_
340 << ", events=" << std::hex << eventFlags_ << ")";
341 // Extract the fd, and set fd_ to -1 first, so closeNow() won't
342 // actually close the descriptor.
343 if (const auto socketSet = wShutdownSocketSet_.lock()) {
344 socketSet->remove(fd_);
348 // Call closeNow() to invoke all pending callbacks with an error.
350 // Update the EventHandler to stop using this fd.
351 // This can only be done after closeNow() unregisters the handler.
352 ioHandler_.changeHandlerFD(-1);
356 const folly::SocketAddress& AsyncSocket::anyAddress() {
357 static const folly::SocketAddress anyAddress =
358 folly::SocketAddress("0.0.0.0", 0);
362 void AsyncSocket::setShutdownSocketSet(
363 const std::weak_ptr<ShutdownSocketSet>& wNewSS) {
364 const auto newSS = wNewSS.lock();
365 const auto shutdownSocketSet = wShutdownSocketSet_.lock();
367 if (newSS == shutdownSocketSet) {
371 if (shutdownSocketSet && fd_ != -1) {
372 shutdownSocketSet->remove(fd_);
375 if (newSS && fd_ != -1) {
379 wShutdownSocketSet_ = wNewSS;
382 void AsyncSocket::setCloseOnExec() {
383 int rv = fcntl(fd_, F_SETFD, FD_CLOEXEC);
385 auto errnoCopy = errno;
386 throw AsyncSocketException(
387 AsyncSocketException::INTERNAL_ERROR,
388 withAddr("failed to set close-on-exec flag"),
393 void AsyncSocket::connect(ConnectCallback* callback,
394 const folly::SocketAddress& address,
396 const OptionMap &options,
397 const folly::SocketAddress& bindAddr) noexcept {
398 DestructorGuard dg(this);
399 eventBase_->dcheckIsInEventBaseThread();
403 // Make sure we're in the uninitialized state
404 if (state_ != StateEnum::UNINIT) {
405 return invalidState(callback);
408 connectTimeout_ = std::chrono::milliseconds(timeout);
409 connectStartTime_ = std::chrono::steady_clock::now();
410 // Make connect end time at least >= connectStartTime.
411 connectEndTime_ = connectStartTime_;
414 state_ = StateEnum::CONNECTING;
415 connectCallback_ = callback;
417 sockaddr_storage addrStorage;
418 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
422 // Technically the first parameter should actually be a protocol family
423 // constant (PF_xxx) rather than an address family (AF_xxx), but the
424 // distinction is mainly just historical. In pretty much all
425 // implementations the PF_foo and AF_foo constants are identical.
426 fd_ = fsp::socket(address.getFamily(), SOCK_STREAM, 0);
428 auto errnoCopy = errno;
429 throw AsyncSocketException(
430 AsyncSocketException::INTERNAL_ERROR,
431 withAddr("failed to create socket"),
434 if (const auto shutdownSocketSet = wShutdownSocketSet_.lock()) {
435 shutdownSocketSet->add(fd_);
437 ioHandler_.changeHandlerFD(fd_);
441 // Put the socket in non-blocking mode
442 int flags = fcntl(fd_, F_GETFL, 0);
444 auto errnoCopy = errno;
445 throw AsyncSocketException(
446 AsyncSocketException::INTERNAL_ERROR,
447 withAddr("failed to get socket flags"),
450 int rv = fcntl(fd_, F_SETFL, flags | O_NONBLOCK);
452 auto errnoCopy = errno;
453 throw AsyncSocketException(
454 AsyncSocketException::INTERNAL_ERROR,
455 withAddr("failed to put socket in non-blocking mode"),
459 #if !defined(MSG_NOSIGNAL) && defined(F_SETNOSIGPIPE)
460 // iOS and OS X don't support MSG_NOSIGNAL; set F_SETNOSIGPIPE instead
461 rv = fcntl(fd_, F_SETNOSIGPIPE, 1);
463 auto errnoCopy = errno;
464 throw AsyncSocketException(
465 AsyncSocketException::INTERNAL_ERROR,
466 "failed to enable F_SETNOSIGPIPE on socket",
471 // By default, turn on TCP_NODELAY
472 // If setNoDelay() fails, we continue anyway; this isn't a fatal error.
473 // setNoDelay() will log an error message if it fails.
474 // Also set the cached zeroCopyVal_ since it cannot be set earlier if the fd
476 if (address.getFamily() != AF_UNIX) {
477 (void)setNoDelay(true);
478 setZeroCopy(zeroCopyVal_);
481 VLOG(5) << "AsyncSocket::connect(this=" << this << ", evb=" << eventBase_
482 << ", fd=" << fd_ << ", host=" << address.describe().c_str();
485 if (bindAddr != anyAddress()) {
487 if (setsockopt(fd_, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) {
488 auto errnoCopy = errno;
490 throw AsyncSocketException(
491 AsyncSocketException::NOT_OPEN,
492 "failed to setsockopt prior to bind on " + bindAddr.describe(),
496 bindAddr.getAddress(&addrStorage);
498 if (bind(fd_, saddr, bindAddr.getActualSize()) != 0) {
499 auto errnoCopy = errno;
501 throw AsyncSocketException(
502 AsyncSocketException::NOT_OPEN,
503 "failed to bind to async socket: " + bindAddr.describe(),
508 // Apply the additional options if any.
509 for (const auto& opt: options) {
510 rv = opt.first.apply(fd_, opt.second);
512 auto errnoCopy = errno;
513 throw AsyncSocketException(
514 AsyncSocketException::INTERNAL_ERROR,
515 withAddr("failed to set socket option"),
520 // Perform the connect()
521 address.getAddress(&addrStorage);
524 state_ = StateEnum::FAST_OPEN;
525 tfoAttempted_ = true;
527 if (socketConnect(saddr, addr_.getActualSize()) < 0) {
532 // If we're still here the connect() succeeded immediately.
533 // Fall through to call the callback outside of this try...catch block
534 } catch (const AsyncSocketException& ex) {
535 return failConnect(__func__, ex);
536 } catch (const std::exception& ex) {
537 // shouldn't happen, but handle it just in case
538 VLOG(4) << "AsyncSocket::connect(this=" << this << ", fd=" << fd_
539 << "): unexpected " << typeid(ex).name() << " exception: "
541 AsyncSocketException tex(AsyncSocketException::INTERNAL_ERROR,
542 withAddr(string("unexpected exception: ") +
544 return failConnect(__func__, tex);
547 // The connection succeeded immediately
548 // The read callback may not have been set yet, and no writes may be pending
549 // yet, so we don't have to register for any events at the moment.
550 VLOG(8) << "AsyncSocket::connect succeeded immediately; this=" << this;
551 assert(errMessageCallback_ == nullptr);
552 assert(readCallback_ == nullptr);
553 assert(writeReqHead_ == nullptr);
554 if (state_ != StateEnum::FAST_OPEN) {
555 state_ = StateEnum::ESTABLISHED;
557 invokeConnectSuccess();
560 int AsyncSocket::socketConnect(const struct sockaddr* saddr, socklen_t len) {
562 if (noTransparentTls_) {
563 // Ignore return value, errors are ok
564 setsockopt(fd_, SOL_SOCKET, SO_NO_TRANSPARENT_TLS, nullptr, 0);
567 VLOG(4) << "Disabling TSOCKS for fd " << fd_;
568 // Ignore return value, errors are ok
569 setsockopt(fd_, SOL_SOCKET, SO_NO_TSOCKS, nullptr, 0);
572 int rv = fsp::connect(fd_, saddr, len);
574 auto errnoCopy = errno;
575 if (errnoCopy == EINPROGRESS) {
576 scheduleConnectTimeout();
577 registerForConnectEvents();
579 throw AsyncSocketException(
580 AsyncSocketException::NOT_OPEN,
581 "connect failed (immediately)",
588 void AsyncSocket::scheduleConnectTimeout() {
589 // Connection in progress.
590 auto timeout = connectTimeout_.count();
592 // Start a timer in case the connection takes too long.
593 if (!writeTimeout_.scheduleTimeout(uint32_t(timeout))) {
594 throw AsyncSocketException(
595 AsyncSocketException::INTERNAL_ERROR,
596 withAddr("failed to schedule AsyncSocket connect timeout"));
601 void AsyncSocket::registerForConnectEvents() {
602 // Register for write events, so we'll
603 // be notified when the connection finishes/fails.
604 // Note that we don't register for a persistent event here.
605 assert(eventFlags_ == EventHandler::NONE);
606 eventFlags_ = EventHandler::WRITE;
607 if (!ioHandler_.registerHandler(eventFlags_)) {
608 throw AsyncSocketException(
609 AsyncSocketException::INTERNAL_ERROR,
610 withAddr("failed to register AsyncSocket connect handler"));
614 void AsyncSocket::connect(ConnectCallback* callback,
615 const string& ip, uint16_t port,
617 const OptionMap &options) noexcept {
618 DestructorGuard dg(this);
620 connectCallback_ = callback;
621 connect(callback, folly::SocketAddress(ip, port), timeout, options);
622 } catch (const std::exception& ex) {
623 AsyncSocketException tex(AsyncSocketException::INTERNAL_ERROR,
625 return failConnect(__func__, tex);
629 void AsyncSocket::cancelConnect() {
630 connectCallback_ = nullptr;
631 if (state_ == StateEnum::CONNECTING || state_ == StateEnum::FAST_OPEN) {
636 void AsyncSocket::setSendTimeout(uint32_t milliseconds) {
637 sendTimeout_ = milliseconds;
639 eventBase_->dcheckIsInEventBaseThread();
642 // If we are currently pending on write requests, immediately update
643 // writeTimeout_ with the new value.
644 if ((eventFlags_ & EventHandler::WRITE) &&
645 (state_ != StateEnum::CONNECTING && state_ != StateEnum::FAST_OPEN)) {
646 assert(state_ == StateEnum::ESTABLISHED);
647 assert((shutdownFlags_ & SHUT_WRITE) == 0);
648 if (sendTimeout_ > 0) {
649 if (!writeTimeout_.scheduleTimeout(sendTimeout_)) {
650 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
651 withAddr("failed to reschedule send timeout in setSendTimeout"));
652 return failWrite(__func__, ex);
655 writeTimeout_.cancelTimeout();
660 void AsyncSocket::setErrMessageCB(ErrMessageCallback* callback) {
661 VLOG(6) << "AsyncSocket::setErrMessageCB() this=" << this
662 << ", fd=" << fd_ << ", callback=" << callback
663 << ", state=" << state_;
665 // Short circuit if callback is the same as the existing errMessageCallback_.
666 if (callback == errMessageCallback_) {
670 if (!msgErrQueueSupported) {
671 // Per-socket error message queue is not supported on this platform.
672 return invalidState(callback);
675 DestructorGuard dg(this);
676 eventBase_->dcheckIsInEventBaseThread();
678 if (callback == nullptr) {
679 // We should be able to reset the callback regardless of the
680 // socket state. It's important to have a reliable callback
681 // cancellation mechanism.
682 errMessageCallback_ = callback;
686 switch ((StateEnum)state_) {
687 case StateEnum::CONNECTING:
688 case StateEnum::FAST_OPEN:
689 case StateEnum::ESTABLISHED: {
690 errMessageCallback_ = callback;
693 case StateEnum::CLOSED:
694 case StateEnum::ERROR:
695 // We should never reach here. SHUT_READ should always be set
696 // if we are in STATE_CLOSED or STATE_ERROR.
698 return invalidState(callback);
699 case StateEnum::UNINIT:
700 // We do not allow setReadCallback() to be called before we start
702 return invalidState(callback);
705 // We don't put a default case in the switch statement, so that the compiler
706 // will warn us to update the switch statement if a new state is added.
707 return invalidState(callback);
710 AsyncSocket::ErrMessageCallback* AsyncSocket::getErrMessageCallback() const {
711 return errMessageCallback_;
714 void AsyncSocket::setSendMsgParamCB(SendMsgParamsCallback* callback) {
715 sendMsgParamCallback_ = callback;
718 AsyncSocket::SendMsgParamsCallback* AsyncSocket::getSendMsgParamsCB() const {
719 return sendMsgParamCallback_;
722 void AsyncSocket::setReadCB(ReadCallback *callback) {
723 VLOG(6) << "AsyncSocket::setReadCallback() this=" << this << ", fd=" << fd_
724 << ", callback=" << callback << ", state=" << state_;
726 // Short circuit if callback is the same as the existing readCallback_.
728 // Note that this is needed for proper functioning during some cleanup cases.
729 // During cleanup we allow setReadCallback(nullptr) to be called even if the
730 // read callback is already unset and we have been detached from an event
731 // base. This check prevents us from asserting
732 // eventBase_->isInEventBaseThread() when eventBase_ is nullptr.
733 if (callback == readCallback_) {
737 /* We are removing a read callback */
738 if (callback == nullptr &&
739 immediateReadHandler_.isLoopCallbackScheduled()) {
740 immediateReadHandler_.cancelLoopCallback();
743 if (shutdownFlags_ & SHUT_READ) {
744 // Reads have already been shut down on this socket.
746 // Allow setReadCallback(nullptr) to be called in this case, but don't
747 // allow a new callback to be set.
749 // For example, setReadCallback(nullptr) can happen after an error if we
750 // invoke some other error callback before invoking readError(). The other
751 // error callback that is invoked first may go ahead and clear the read
752 // callback before we get a chance to invoke readError().
753 if (callback != nullptr) {
754 return invalidState(callback);
756 assert((eventFlags_ & EventHandler::READ) == 0);
757 readCallback_ = nullptr;
761 DestructorGuard dg(this);
762 eventBase_->dcheckIsInEventBaseThread();
764 switch ((StateEnum)state_) {
765 case StateEnum::CONNECTING:
766 case StateEnum::FAST_OPEN:
767 // For convenience, we allow the read callback to be set while we are
768 // still connecting. We just store the callback for now. Once the
769 // connection completes we'll register for read events.
770 readCallback_ = callback;
772 case StateEnum::ESTABLISHED:
774 readCallback_ = callback;
775 uint16_t oldFlags = eventFlags_;
777 eventFlags_ |= EventHandler::READ;
779 eventFlags_ &= ~EventHandler::READ;
782 // Update our registration if our flags have changed
783 if (eventFlags_ != oldFlags) {
784 // We intentionally ignore the return value here.
785 // updateEventRegistration() will move us into the error state if it
786 // fails, and we don't need to do anything else here afterwards.
787 (void)updateEventRegistration();
791 checkForImmediateRead();
795 case StateEnum::CLOSED:
796 case StateEnum::ERROR:
797 // We should never reach here. SHUT_READ should always be set
798 // if we are in STATE_CLOSED or STATE_ERROR.
800 return invalidState(callback);
801 case StateEnum::UNINIT:
802 // We do not allow setReadCallback() to be called before we start
804 return invalidState(callback);
807 // We don't put a default case in the switch statement, so that the compiler
808 // will warn us to update the switch statement if a new state is added.
809 return invalidState(callback);
812 AsyncSocket::ReadCallback* AsyncSocket::getReadCallback() const {
813 return readCallback_;
816 bool AsyncSocket::setZeroCopy(bool enable) {
817 if (msgErrQueueSupported) {
818 zeroCopyVal_ = enable;
824 int val = enable ? 1 : 0;
825 int ret = setsockopt(fd_, SOL_SOCKET, SO_ZEROCOPY, &val, sizeof(val));
827 // if enable == false, set zeroCopyEnabled_ = false regardless
828 // if SO_ZEROCOPY is set or not
830 zeroCopyEnabled_ = enable;
834 /* if the setsockopt failed, try to see if the socket inherited the flag
835 * since we cannot set SO_ZEROCOPY on a socket s = accept
839 socklen_t optlen = sizeof(val);
840 ret = getsockopt(fd_, SOL_SOCKET, SO_ZEROCOPY, &val, &optlen);
843 enable = val ? true : false;
848 zeroCopyEnabled_ = enable;
857 bool AsyncSocket::isZeroCopyRequest(WriteFlags flags) {
858 return (zeroCopyEnabled_ && isSet(flags, WriteFlags::WRITE_MSG_ZEROCOPY));
861 void AsyncSocket::adjustZeroCopyFlags(folly::WriteFlags& flags) {
862 if (!zeroCopyEnabled_) {
863 flags = unSet(flags, folly::WriteFlags::WRITE_MSG_ZEROCOPY);
867 void AsyncSocket::addZeroCopyBuf(std::unique_ptr<folly::IOBuf>&& buf) {
868 uint32_t id = getNextZeroCopyBufId();
869 folly::IOBuf* ptr = buf.get();
871 idZeroCopyBufPtrMap_[id] = ptr;
872 auto& p = idZeroCopyBufInfoMap_[ptr];
874 CHECK(p.buf_.get() == nullptr);
875 p.buf_ = std::move(buf);
878 void AsyncSocket::addZeroCopyBuf(folly::IOBuf* ptr) {
879 uint32_t id = getNextZeroCopyBufId();
880 idZeroCopyBufPtrMap_[id] = ptr;
882 idZeroCopyBufInfoMap_[ptr].count_++;
885 void AsyncSocket::releaseZeroCopyBuf(uint32_t id) {
886 auto iter = idZeroCopyBufPtrMap_.find(id);
887 CHECK(iter != idZeroCopyBufPtrMap_.end());
888 auto ptr = iter->second;
889 auto iter1 = idZeroCopyBufInfoMap_.find(ptr);
890 CHECK(iter1 != idZeroCopyBufInfoMap_.end());
891 if (0 == --iter1->second.count_) {
892 idZeroCopyBufInfoMap_.erase(iter1);
896 void AsyncSocket::setZeroCopyBuf(std::unique_ptr<folly::IOBuf>&& buf) {
897 folly::IOBuf* ptr = buf.get();
898 auto& p = idZeroCopyBufInfoMap_[ptr];
899 CHECK(p.buf_.get() == nullptr);
901 p.buf_ = std::move(buf);
904 bool AsyncSocket::containsZeroCopyBuf(folly::IOBuf* ptr) {
905 return (idZeroCopyBufInfoMap_.find(ptr) != idZeroCopyBufInfoMap_.end());
908 bool AsyncSocket::isZeroCopyMsg(const cmsghdr& cmsg) const {
909 #ifdef FOLLY_HAVE_MSG_ERRQUEUE
910 if (zeroCopyEnabled_ &&
911 ((cmsg.cmsg_level == SOL_IP && cmsg.cmsg_type == IP_RECVERR) ||
912 (cmsg.cmsg_level == SOL_IPV6 && cmsg.cmsg_type == IPV6_RECVERR))) {
913 const struct sock_extended_err* serr =
914 reinterpret_cast<const struct sock_extended_err*>(CMSG_DATA(&cmsg));
916 (serr->ee_errno == 0) && (serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY));
922 void AsyncSocket::processZeroCopyMsg(const cmsghdr& cmsg) {
923 #ifdef FOLLY_HAVE_MSG_ERRQUEUE
924 const struct sock_extended_err* serr =
925 reinterpret_cast<const struct sock_extended_err*>(CMSG_DATA(&cmsg));
926 uint32_t hi = serr->ee_data;
927 uint32_t lo = serr->ee_info;
928 // disable zero copy if the buffer was actually copied
929 if ((serr->ee_code & SO_EE_CODE_ZEROCOPY_COPIED) && zeroCopyEnabled_) {
930 VLOG(2) << "AsyncSocket::processZeroCopyMsg(): setting "
931 << "zeroCopyEnabled_ = false due to SO_EE_CODE_ZEROCOPY_COPIED "
933 zeroCopyEnabled_ = false;
936 for (uint32_t i = lo; i <= hi; i++) {
937 releaseZeroCopyBuf(i);
942 void AsyncSocket::write(WriteCallback* callback,
943 const void* buf, size_t bytes, WriteFlags flags) {
945 op.iov_base = const_cast<void*>(buf);
947 writeImpl(callback, &op, 1, unique_ptr<IOBuf>(), flags);
950 void AsyncSocket::writev(WriteCallback* callback,
954 writeImpl(callback, vec, count, unique_ptr<IOBuf>(), flags);
957 void AsyncSocket::writeChain(WriteCallback* callback, unique_ptr<IOBuf>&& buf,
959 adjustZeroCopyFlags(flags);
961 constexpr size_t kSmallSizeMax = 64;
962 size_t count = buf->countChainElements();
963 if (count <= kSmallSizeMax) {
964 // suppress "warning: variable length array 'vec' is used [-Wvla]"
966 FOLLY_GCC_DISABLE_WARNING("-Wvla")
967 iovec vec[BOOST_PP_IF(FOLLY_HAVE_VLA, count, kSmallSizeMax)];
970 writeChainImpl(callback, vec, count, std::move(buf), flags);
972 iovec* vec = new iovec[count];
973 writeChainImpl(callback, vec, count, std::move(buf), flags);
978 void AsyncSocket::writeChainImpl(WriteCallback* callback, iovec* vec,
979 size_t count, unique_ptr<IOBuf>&& buf, WriteFlags flags) {
980 size_t veclen = buf->fillIov(vec, count);
981 writeImpl(callback, vec, veclen, std::move(buf), flags);
984 void AsyncSocket::writeImpl(WriteCallback* callback, const iovec* vec,
985 size_t count, unique_ptr<IOBuf>&& buf,
987 VLOG(6) << "AsyncSocket::writev() this=" << this << ", fd=" << fd_
988 << ", callback=" << callback << ", count=" << count
989 << ", state=" << state_;
990 DestructorGuard dg(this);
991 unique_ptr<IOBuf>ioBuf(std::move(buf));
992 eventBase_->dcheckIsInEventBaseThread();
994 if (shutdownFlags_ & (SHUT_WRITE | SHUT_WRITE_PENDING)) {
995 // No new writes may be performed after the write side of the socket has
998 // We could just call callback->writeError() here to fail just this write.
999 // However, fail hard and use invalidState() to fail all outstanding
1000 // callbacks and move the socket into the error state. There's most likely
1001 // a bug in the caller's code, so we abort everything rather than trying to
1002 // proceed as best we can.
1003 return invalidState(callback);
1006 uint32_t countWritten = 0;
1007 uint32_t partialWritten = 0;
1008 ssize_t bytesWritten = 0;
1009 bool mustRegister = false;
1010 if ((state_ == StateEnum::ESTABLISHED || state_ == StateEnum::FAST_OPEN) &&
1012 if (writeReqHead_ == nullptr) {
1013 // If we are established and there are no other writes pending,
1014 // we can attempt to perform the write immediately.
1015 assert(writeReqTail_ == nullptr);
1016 assert((eventFlags_ & EventHandler::WRITE) == 0);
1018 auto writeResult = performWrite(
1019 vec, uint32_t(count), flags, &countWritten, &partialWritten);
1020 bytesWritten = writeResult.writeReturn;
1021 if (bytesWritten < 0) {
1022 auto errnoCopy = errno;
1023 if (writeResult.exception) {
1024 return failWrite(__func__, callback, 0, *writeResult.exception);
1026 AsyncSocketException ex(
1027 AsyncSocketException::INTERNAL_ERROR,
1028 withAddr("writev failed"),
1030 return failWrite(__func__, callback, 0, ex);
1031 } else if (countWritten == count) {
1032 // done, add the whole buffer
1033 if (isZeroCopyRequest(flags)) {
1034 addZeroCopyBuf(std::move(ioBuf));
1036 // We successfully wrote everything.
1037 // Invoke the callback and return.
1039 callback->writeSuccess();
1042 } else { // continue writing the next writeReq
1044 if (isZeroCopyRequest(flags)) {
1045 addZeroCopyBuf(ioBuf.get());
1047 if (bufferCallback_) {
1048 bufferCallback_->onEgressBuffered();
1051 if (!connecting()) {
1052 // Writes might put the socket back into connecting state
1053 // if TFO is enabled, and using TFO fails.
1054 // This means that write timeouts would not be active, however
1055 // connect timeouts would affect this stage.
1056 mustRegister = true;
1059 } else if (!connecting()) {
1060 // Invalid state for writing
1061 return invalidState(callback);
1064 // Create a new WriteRequest to add to the queue
1067 req = BytesWriteRequest::newRequest(
1071 uint32_t(count - countWritten),
1073 uint32_t(bytesWritten),
1076 } catch (const std::exception& ex) {
1077 // we mainly expect to catch std::bad_alloc here
1078 AsyncSocketException tex(AsyncSocketException::INTERNAL_ERROR,
1079 withAddr(string("failed to append new WriteRequest: ") + ex.what()));
1080 return failWrite(__func__, callback, size_t(bytesWritten), tex);
1083 if (writeReqTail_ == nullptr) {
1084 assert(writeReqHead_ == nullptr);
1085 writeReqHead_ = writeReqTail_ = req;
1087 writeReqTail_->append(req);
1088 writeReqTail_ = req;
1091 // Register for write events if are established and not currently
1092 // waiting on write events
1094 assert(state_ == StateEnum::ESTABLISHED);
1095 assert((eventFlags_ & EventHandler::WRITE) == 0);
1096 if (!updateEventRegistration(EventHandler::WRITE, 0)) {
1097 assert(state_ == StateEnum::ERROR);
1100 if (sendTimeout_ > 0) {
1101 // Schedule a timeout to fire if the write takes too long.
1102 if (!writeTimeout_.scheduleTimeout(sendTimeout_)) {
1103 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
1104 withAddr("failed to schedule send timeout"));
1105 return failWrite(__func__, ex);
1111 void AsyncSocket::writeRequest(WriteRequest* req) {
1112 if (writeReqTail_ == nullptr) {
1113 assert(writeReqHead_ == nullptr);
1114 writeReqHead_ = writeReqTail_ = req;
1117 writeReqTail_->append(req);
1118 writeReqTail_ = req;
1122 void AsyncSocket::close() {
1123 VLOG(5) << "AsyncSocket::close(): this=" << this << ", fd_=" << fd_
1124 << ", state=" << state_ << ", shutdownFlags="
1125 << std::hex << (int) shutdownFlags_;
1127 // close() is only different from closeNow() when there are pending writes
1128 // that need to drain before we can close. In all other cases, just call
1131 // Note that writeReqHead_ can be non-nullptr even in STATE_CLOSED or
1132 // STATE_ERROR if close() is invoked while a previous closeNow() or failure
1133 // is still running. (e.g., If there are multiple pending writes, and we
1134 // call writeError() on the first one, it may call close(). In this case we
1135 // will already be in STATE_CLOSED or STATE_ERROR, but the remaining pending
1136 // writes will still be in the queue.)
1138 // We only need to drain pending writes if we are still in STATE_CONNECTING
1139 // or STATE_ESTABLISHED
1140 if ((writeReqHead_ == nullptr) ||
1141 !(state_ == StateEnum::CONNECTING ||
1142 state_ == StateEnum::ESTABLISHED)) {
1147 // Declare a DestructorGuard to ensure that the AsyncSocket cannot be
1148 // destroyed until close() returns.
1149 DestructorGuard dg(this);
1150 eventBase_->dcheckIsInEventBaseThread();
1152 // Since there are write requests pending, we have to set the
1153 // SHUT_WRITE_PENDING flag, and wait to perform the real close until the
1154 // connect finishes and we finish writing these requests.
1156 // Set SHUT_READ to indicate that reads are shut down, and set the
1157 // SHUT_WRITE_PENDING flag to mark that we want to shutdown once the
1158 // pending writes complete.
1159 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE_PENDING);
1161 // If a read callback is set, invoke readEOF() immediately to inform it that
1162 // the socket has been closed and no more data can be read.
1163 if (readCallback_) {
1164 // Disable reads if they are enabled
1165 if (!updateEventRegistration(0, EventHandler::READ)) {
1166 // We're now in the error state; callbacks have been cleaned up
1167 assert(state_ == StateEnum::ERROR);
1168 assert(readCallback_ == nullptr);
1170 ReadCallback* callback = readCallback_;
1171 readCallback_ = nullptr;
1172 callback->readEOF();
1177 void AsyncSocket::closeNow() {
1178 VLOG(5) << "AsyncSocket::closeNow(): this=" << this << ", fd_=" << fd_
1179 << ", state=" << state_ << ", shutdownFlags="
1180 << std::hex << (int) shutdownFlags_;
1181 DestructorGuard dg(this);
1183 eventBase_->dcheckIsInEventBaseThread();
1187 case StateEnum::ESTABLISHED:
1188 case StateEnum::CONNECTING:
1189 case StateEnum::FAST_OPEN: {
1190 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE);
1191 state_ = StateEnum::CLOSED;
1193 // If the write timeout was set, cancel it.
1194 writeTimeout_.cancelTimeout();
1196 // If we are registered for I/O events, unregister.
1197 if (eventFlags_ != EventHandler::NONE) {
1198 eventFlags_ = EventHandler::NONE;
1199 if (!updateEventRegistration()) {
1200 // We will have been moved into the error state.
1201 assert(state_ == StateEnum::ERROR);
1206 if (immediateReadHandler_.isLoopCallbackScheduled()) {
1207 immediateReadHandler_.cancelLoopCallback();
1211 ioHandler_.changeHandlerFD(-1);
1215 invokeConnectErr(socketClosedLocallyEx);
1217 failAllWrites(socketClosedLocallyEx);
1219 if (readCallback_) {
1220 ReadCallback* callback = readCallback_;
1221 readCallback_ = nullptr;
1222 callback->readEOF();
1226 case StateEnum::CLOSED:
1227 // Do nothing. It's possible that we are being called recursively
1228 // from inside a callback that we invoked inside another call to close()
1229 // that is still running.
1231 case StateEnum::ERROR:
1232 // Do nothing. The error handling code has performed (or is performing)
1235 case StateEnum::UNINIT:
1236 assert(eventFlags_ == EventHandler::NONE);
1237 assert(connectCallback_ == nullptr);
1238 assert(readCallback_ == nullptr);
1239 assert(writeReqHead_ == nullptr);
1240 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE);
1241 state_ = StateEnum::CLOSED;
1245 LOG(DFATAL) << "AsyncSocket::closeNow() (this=" << this << ", fd=" << fd_
1246 << ") called in unknown state " << state_;
1249 void AsyncSocket::closeWithReset() {
1250 // Enable SO_LINGER, with the linger timeout set to 0.
1251 // This will trigger a TCP reset when we close the socket.
1253 struct linger optLinger = {1, 0};
1254 if (setSockOpt(SOL_SOCKET, SO_LINGER, &optLinger) != 0) {
1255 VLOG(2) << "AsyncSocket::closeWithReset(): error setting SO_LINGER "
1256 << "on " << fd_ << ": errno=" << errno;
1260 // Then let closeNow() take care of the rest
1264 void AsyncSocket::shutdownWrite() {
1265 VLOG(5) << "AsyncSocket::shutdownWrite(): this=" << this << ", fd=" << fd_
1266 << ", state=" << state_ << ", shutdownFlags="
1267 << std::hex << (int) shutdownFlags_;
1269 // If there are no pending writes, shutdownWrite() is identical to
1270 // shutdownWriteNow().
1271 if (writeReqHead_ == nullptr) {
1276 eventBase_->dcheckIsInEventBaseThread();
1278 // There are pending writes. Set SHUT_WRITE_PENDING so that the actual
1279 // shutdown will be performed once all writes complete.
1280 shutdownFlags_ |= SHUT_WRITE_PENDING;
1283 void AsyncSocket::shutdownWriteNow() {
1284 VLOG(5) << "AsyncSocket::shutdownWriteNow(): this=" << this
1285 << ", fd=" << fd_ << ", state=" << state_
1286 << ", shutdownFlags=" << std::hex << (int) shutdownFlags_;
1288 if (shutdownFlags_ & SHUT_WRITE) {
1289 // Writes are already shutdown; nothing else to do.
1293 // If SHUT_READ is already set, just call closeNow() to completely
1294 // close the socket. This can happen if close() was called with writes
1295 // pending, and then shutdownWriteNow() is called before all pending writes
1297 if (shutdownFlags_ & SHUT_READ) {
1302 DestructorGuard dg(this);
1304 eventBase_->dcheckIsInEventBaseThread();
1307 switch (static_cast<StateEnum>(state_)) {
1308 case StateEnum::ESTABLISHED:
1310 shutdownFlags_ |= SHUT_WRITE;
1312 // If the write timeout was set, cancel it.
1313 writeTimeout_.cancelTimeout();
1315 // If we are registered for write events, unregister.
1316 if (!updateEventRegistration(0, EventHandler::WRITE)) {
1317 // We will have been moved into the error state.
1318 assert(state_ == StateEnum::ERROR);
1322 // Shutdown writes on the file descriptor
1323 shutdown(fd_, SHUT_WR);
1325 // Immediately fail all write requests
1326 failAllWrites(socketShutdownForWritesEx);
1329 case StateEnum::CONNECTING:
1331 // Set the SHUT_WRITE_PENDING flag.
1332 // When the connection completes, it will check this flag,
1333 // shutdown the write half of the socket, and then set SHUT_WRITE.
1334 shutdownFlags_ |= SHUT_WRITE_PENDING;
1336 // Immediately fail all write requests
1337 failAllWrites(socketShutdownForWritesEx);
1340 case StateEnum::UNINIT:
1341 // Callers normally shouldn't call shutdownWriteNow() before the socket
1342 // even starts connecting. Nonetheless, go ahead and set
1343 // SHUT_WRITE_PENDING. Once the socket eventually connects it will
1344 // immediately shut down the write side of the socket.
1345 shutdownFlags_ |= SHUT_WRITE_PENDING;
1347 case StateEnum::FAST_OPEN:
1348 // In fast open state we haven't call connected yet, and if we shutdown
1349 // the writes, we will never try to call connect, so shut everything down
1350 shutdownFlags_ |= SHUT_WRITE;
1351 // Immediately fail all write requests
1352 failAllWrites(socketShutdownForWritesEx);
1354 case StateEnum::CLOSED:
1355 case StateEnum::ERROR:
1356 // We should never get here. SHUT_WRITE should always be set
1357 // in STATE_CLOSED and STATE_ERROR.
1358 VLOG(4) << "AsyncSocket::shutdownWriteNow() (this=" << this
1359 << ", fd=" << fd_ << ") in unexpected state " << state_
1360 << " with SHUT_WRITE not set ("
1361 << std::hex << (int) shutdownFlags_ << ")";
1366 LOG(DFATAL) << "AsyncSocket::shutdownWriteNow() (this=" << this << ", fd="
1367 << fd_ << ") called in unknown state " << state_;
1370 bool AsyncSocket::readable() const {
1374 struct pollfd fds[1];
1376 fds[0].events = POLLIN;
1378 int rc = poll(fds, 1, 0);
1382 bool AsyncSocket::writable() const {
1386 struct pollfd fds[1];
1388 fds[0].events = POLLOUT;
1390 int rc = poll(fds, 1, 0);
1394 bool AsyncSocket::isPending() const {
1395 return ioHandler_.isPending();
1398 bool AsyncSocket::hangup() const {
1400 // sanity check, no one should ask for hangup if we are not connected.
1404 #ifdef POLLRDHUP // Linux-only
1405 struct pollfd fds[1];
1407 fds[0].events = POLLRDHUP|POLLHUP;
1410 return (fds[0].revents & (POLLRDHUP|POLLHUP)) != 0;
1416 bool AsyncSocket::good() const {
1418 (state_ == StateEnum::CONNECTING || state_ == StateEnum::FAST_OPEN ||
1419 state_ == StateEnum::ESTABLISHED) &&
1420 (shutdownFlags_ == 0) && (eventBase_ != nullptr));
1423 bool AsyncSocket::error() const {
1424 return (state_ == StateEnum::ERROR);
1427 void AsyncSocket::attachEventBase(EventBase* eventBase) {
1428 VLOG(5) << "AsyncSocket::attachEventBase(this=" << this << ", fd=" << fd_
1429 << ", old evb=" << eventBase_ << ", new evb=" << eventBase
1430 << ", state=" << state_ << ", events="
1431 << std::hex << eventFlags_ << ")";
1432 assert(eventBase_ == nullptr);
1433 eventBase->dcheckIsInEventBaseThread();
1435 eventBase_ = eventBase;
1436 ioHandler_.attachEventBase(eventBase);
1438 updateEventRegistration();
1440 writeTimeout_.attachEventBase(eventBase);
1442 evbChangeCb_->evbAttached(this);
1446 void AsyncSocket::detachEventBase() {
1447 VLOG(5) << "AsyncSocket::detachEventBase(this=" << this << ", fd=" << fd_
1448 << ", old evb=" << eventBase_ << ", state=" << state_
1449 << ", events=" << std::hex << eventFlags_ << ")";
1450 assert(eventBase_ != nullptr);
1451 eventBase_->dcheckIsInEventBaseThread();
1453 eventBase_ = nullptr;
1455 ioHandler_.unregisterHandler();
1457 ioHandler_.detachEventBase();
1458 writeTimeout_.detachEventBase();
1460 evbChangeCb_->evbDetached(this);
1464 bool AsyncSocket::isDetachable() const {
1465 DCHECK(eventBase_ != nullptr);
1466 eventBase_->dcheckIsInEventBaseThread();
1468 return !writeTimeout_.isScheduled();
1471 void AsyncSocket::cacheAddresses() {
1474 cacheLocalAddress();
1476 } catch (const std::system_error& e) {
1477 if (e.code() != std::error_code(ENOTCONN, std::system_category())) {
1478 VLOG(1) << "Error caching addresses: " << e.code().value() << ", "
1479 << e.code().message();
1485 void AsyncSocket::cacheLocalAddress() const {
1486 if (!localAddr_.isInitialized()) {
1487 localAddr_.setFromLocalAddress(fd_);
1491 void AsyncSocket::cachePeerAddress() const {
1492 if (!addr_.isInitialized()) {
1493 addr_.setFromPeerAddress(fd_);
1497 bool AsyncSocket::isZeroCopyWriteInProgress() const noexcept {
1498 eventBase_->dcheckIsInEventBaseThread();
1499 return (!idZeroCopyBufPtrMap_.empty());
1502 void AsyncSocket::getLocalAddress(folly::SocketAddress* address) const {
1503 cacheLocalAddress();
1504 *address = localAddr_;
1507 void AsyncSocket::getPeerAddress(folly::SocketAddress* address) const {
1512 bool AsyncSocket::getTFOSucceded() const {
1513 return detail::tfo_succeeded(fd_);
1516 int AsyncSocket::setNoDelay(bool noDelay) {
1518 VLOG(4) << "AsyncSocket::setNoDelay() called on non-open socket "
1519 << this << "(state=" << state_ << ")";
1524 int value = noDelay ? 1 : 0;
1525 if (setsockopt(fd_, IPPROTO_TCP, TCP_NODELAY, &value, sizeof(value)) != 0) {
1526 int errnoCopy = errno;
1527 VLOG(2) << "failed to update TCP_NODELAY option on AsyncSocket "
1528 << this << " (fd=" << fd_ << ", state=" << state_ << "): "
1529 << strerror(errnoCopy);
1536 int AsyncSocket::setCongestionFlavor(const std::string &cname) {
1538 #ifndef TCP_CONGESTION
1539 #define TCP_CONGESTION 13
1543 VLOG(4) << "AsyncSocket::setCongestionFlavor() called on non-open "
1544 << "socket " << this << "(state=" << state_ << ")";
1554 socklen_t(cname.length() + 1)) != 0) {
1555 int errnoCopy = errno;
1556 VLOG(2) << "failed to update TCP_CONGESTION option on AsyncSocket "
1557 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1558 << strerror(errnoCopy);
1565 int AsyncSocket::setQuickAck(bool quickack) {
1568 VLOG(4) << "AsyncSocket::setQuickAck() called on non-open socket "
1569 << this << "(state=" << state_ << ")";
1574 #ifdef TCP_QUICKACK // Linux-only
1575 int value = quickack ? 1 : 0;
1576 if (setsockopt(fd_, IPPROTO_TCP, TCP_QUICKACK, &value, sizeof(value)) != 0) {
1577 int errnoCopy = errno;
1578 VLOG(2) << "failed to update TCP_QUICKACK option on AsyncSocket"
1579 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1580 << strerror(errnoCopy);
1590 int AsyncSocket::setSendBufSize(size_t bufsize) {
1592 VLOG(4) << "AsyncSocket::setSendBufSize() called on non-open socket "
1593 << this << "(state=" << state_ << ")";
1597 if (setsockopt(fd_, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)) !=0) {
1598 int errnoCopy = errno;
1599 VLOG(2) << "failed to update SO_SNDBUF option on AsyncSocket"
1600 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1601 << strerror(errnoCopy);
1608 int AsyncSocket::setRecvBufSize(size_t bufsize) {
1610 VLOG(4) << "AsyncSocket::setRecvBufSize() called on non-open socket "
1611 << this << "(state=" << state_ << ")";
1615 if (setsockopt(fd_, SOL_SOCKET, SO_RCVBUF, &bufsize, sizeof(bufsize)) !=0) {
1616 int errnoCopy = errno;
1617 VLOG(2) << "failed to update SO_RCVBUF option on AsyncSocket"
1618 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1619 << strerror(errnoCopy);
1626 int AsyncSocket::setTCPProfile(int profd) {
1628 VLOG(4) << "AsyncSocket::setTCPProfile() called on non-open socket "
1629 << this << "(state=" << state_ << ")";
1633 if (setsockopt(fd_, SOL_SOCKET, SO_SET_NAMESPACE, &profd, sizeof(int)) !=0) {
1634 int errnoCopy = errno;
1635 VLOG(2) << "failed to set socket namespace option on AsyncSocket"
1636 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1637 << strerror(errnoCopy);
1644 void AsyncSocket::ioReady(uint16_t events) noexcept {
1645 VLOG(7) << "AsyncSocket::ioRead() this=" << this << ", fd=" << fd_
1646 << ", events=" << std::hex << events << ", state=" << state_;
1647 DestructorGuard dg(this);
1648 assert(events & EventHandler::READ_WRITE);
1649 eventBase_->dcheckIsInEventBaseThread();
1651 uint16_t relevantEvents = uint16_t(events & EventHandler::READ_WRITE);
1652 EventBase* originalEventBase = eventBase_;
1653 // If we got there it means that either EventHandler::READ or
1654 // EventHandler::WRITE is set. Any of these flags can
1655 // indicate that there are messages available in the socket
1656 // error message queue.
1657 handleErrMessages();
1659 // Return now if handleErrMessages() detached us from our EventBase
1660 if (eventBase_ != originalEventBase) {
1664 if (relevantEvents == EventHandler::READ) {
1666 } else if (relevantEvents == EventHandler::WRITE) {
1668 } else if (relevantEvents == EventHandler::READ_WRITE) {
1669 // If both read and write events are ready, process writes first.
1672 // Return now if handleWrite() detached us from our EventBase
1673 if (eventBase_ != originalEventBase) {
1677 // Only call handleRead() if a read callback is still installed.
1678 // (It's possible that the read callback was uninstalled during
1680 if (readCallback_) {
1684 VLOG(4) << "AsyncSocket::ioRead() called with unexpected events "
1685 << std::hex << events << "(this=" << this << ")";
1690 AsyncSocket::ReadResult
1691 AsyncSocket::performRead(void** buf, size_t* buflen, size_t* /* offset */) {
1692 VLOG(5) << "AsyncSocket::performRead() this=" << this << ", buf=" << *buf
1693 << ", buflen=" << *buflen;
1695 if (preReceivedData_ && !preReceivedData_->empty()) {
1696 VLOG(5) << "AsyncSocket::performRead() this=" << this
1697 << ", reading pre-received data";
1699 io::Cursor cursor(preReceivedData_.get());
1700 auto len = cursor.pullAtMost(*buf, *buflen);
1703 queue.append(std::move(preReceivedData_));
1704 queue.trimStart(len);
1705 preReceivedData_ = queue.move();
1707 appBytesReceived_ += len;
1708 return ReadResult(len);
1711 ssize_t bytes = recv(fd_, *buf, *buflen, MSG_DONTWAIT);
1713 if (errno == EAGAIN || errno == EWOULDBLOCK) {
1714 // No more data to read right now.
1715 return ReadResult(READ_BLOCKING);
1717 return ReadResult(READ_ERROR);
1720 appBytesReceived_ += bytes;
1721 return ReadResult(bytes);
1725 void AsyncSocket::prepareReadBuffer(void** buf, size_t* buflen) {
1726 // no matter what, buffer should be preapared for non-ssl socket
1727 CHECK(readCallback_);
1728 readCallback_->getReadBuffer(buf, buflen);
1731 void AsyncSocket::handleErrMessages() noexcept {
1732 // This method has non-empty implementation only for platforms
1733 // supporting per-socket error queues.
1734 VLOG(5) << "AsyncSocket::handleErrMessages() this=" << this << ", fd=" << fd_
1735 << ", state=" << state_;
1736 if (errMessageCallback_ == nullptr && idZeroCopyBufPtrMap_.empty()) {
1737 VLOG(7) << "AsyncSocket::handleErrMessages(): "
1738 << "no callback installed - exiting.";
1742 #ifdef FOLLY_HAVE_MSG_ERRQUEUE
1748 entry.iov_base = &data;
1749 entry.iov_len = sizeof(data);
1750 msg.msg_iov = &entry;
1752 msg.msg_name = nullptr;
1753 msg.msg_namelen = 0;
1754 msg.msg_control = ctrl;
1755 msg.msg_controllen = sizeof(ctrl);
1760 ret = recvmsg(fd_, &msg, MSG_ERRQUEUE);
1761 VLOG(5) << "AsyncSocket::handleErrMessages(): recvmsg returned " << ret;
1764 if (errno != EAGAIN) {
1765 auto errnoCopy = errno;
1766 LOG(ERROR) << "::recvmsg exited with code " << ret
1767 << ", errno: " << errnoCopy;
1768 AsyncSocketException ex(
1769 AsyncSocketException::INTERNAL_ERROR,
1770 withAddr("recvmsg() failed"),
1772 failErrMessageRead(__func__, ex);
1777 for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
1778 cmsg != nullptr && cmsg->cmsg_len != 0;
1779 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
1780 if (isZeroCopyMsg(*cmsg)) {
1781 processZeroCopyMsg(*cmsg);
1783 if (errMessageCallback_) {
1784 errMessageCallback_->errMessage(*cmsg);
1789 #endif // FOLLY_HAVE_MSG_ERRQUEUE
1792 void AsyncSocket::handleRead() noexcept {
1793 VLOG(5) << "AsyncSocket::handleRead() this=" << this << ", fd=" << fd_
1794 << ", state=" << state_;
1795 assert(state_ == StateEnum::ESTABLISHED);
1796 assert((shutdownFlags_ & SHUT_READ) == 0);
1797 assert(readCallback_ != nullptr);
1798 assert(eventFlags_ & EventHandler::READ);
1801 // - a read attempt would block
1802 // - readCallback_ is uninstalled
1803 // - the number of loop iterations exceeds the optional maximum
1804 // - this AsyncSocket is moved to another EventBase
1806 // When we invoke readDataAvailable() it may uninstall the readCallback_,
1807 // which is why need to check for it here.
1809 // The last bullet point is slightly subtle. readDataAvailable() may also
1810 // detach this socket from this EventBase. However, before
1811 // readDataAvailable() returns another thread may pick it up, attach it to
1812 // a different EventBase, and install another readCallback_. We need to
1813 // exit immediately after readDataAvailable() returns if the eventBase_ has
1814 // changed. (The caller must perform some sort of locking to transfer the
1815 // AsyncSocket between threads properly. This will be sufficient to ensure
1816 // that this thread sees the updated eventBase_ variable after
1817 // readDataAvailable() returns.)
1818 uint16_t numReads = 0;
1819 EventBase* originalEventBase = eventBase_;
1820 while (readCallback_ && eventBase_ == originalEventBase) {
1821 // Get the buffer to read into.
1822 void* buf = nullptr;
1823 size_t buflen = 0, offset = 0;
1825 prepareReadBuffer(&buf, &buflen);
1826 VLOG(5) << "prepareReadBuffer() buf=" << buf << ", buflen=" << buflen;
1827 } catch (const AsyncSocketException& ex) {
1828 return failRead(__func__, ex);
1829 } catch (const std::exception& ex) {
1830 AsyncSocketException tex(AsyncSocketException::BAD_ARGS,
1831 string("ReadCallback::getReadBuffer() "
1832 "threw exception: ") +
1834 return failRead(__func__, tex);
1836 AsyncSocketException ex(AsyncSocketException::BAD_ARGS,
1837 "ReadCallback::getReadBuffer() threw "
1838 "non-exception type");
1839 return failRead(__func__, ex);
1841 if (!isBufferMovable_ && (buf == nullptr || buflen == 0)) {
1842 AsyncSocketException ex(AsyncSocketException::BAD_ARGS,
1843 "ReadCallback::getReadBuffer() returned "
1845 return failRead(__func__, ex);
1849 auto readResult = performRead(&buf, &buflen, &offset);
1850 auto bytesRead = readResult.readReturn;
1851 VLOG(4) << "this=" << this << ", AsyncSocket::handleRead() got "
1852 << bytesRead << " bytes";
1853 if (bytesRead > 0) {
1854 if (!isBufferMovable_) {
1855 readCallback_->readDataAvailable(size_t(bytesRead));
1857 CHECK(kOpenSslModeMoveBufferOwnership);
1858 VLOG(5) << "this=" << this << ", AsyncSocket::handleRead() got "
1859 << "buf=" << buf << ", " << bytesRead << "/" << buflen
1860 << ", offset=" << offset;
1861 auto readBuf = folly::IOBuf::takeOwnership(buf, buflen);
1862 readBuf->trimStart(offset);
1863 readBuf->trimEnd(buflen - offset - bytesRead);
1864 readCallback_->readBufferAvailable(std::move(readBuf));
1867 // Fall through and continue around the loop if the read
1868 // completely filled the available buffer.
1869 // Note that readCallback_ may have been uninstalled or changed inside
1870 // readDataAvailable().
1871 if (size_t(bytesRead) < buflen) {
1874 } else if (bytesRead == READ_BLOCKING) {
1875 // No more data to read right now.
1877 } else if (bytesRead == READ_ERROR) {
1878 readErr_ = READ_ERROR;
1879 if (readResult.exception) {
1880 return failRead(__func__, *readResult.exception);
1882 auto errnoCopy = errno;
1883 AsyncSocketException ex(
1884 AsyncSocketException::INTERNAL_ERROR,
1885 withAddr("recv() failed"),
1887 return failRead(__func__, ex);
1889 assert(bytesRead == READ_EOF);
1890 readErr_ = READ_EOF;
1892 shutdownFlags_ |= SHUT_READ;
1893 if (!updateEventRegistration(0, EventHandler::READ)) {
1894 // we've already been moved into STATE_ERROR
1895 assert(state_ == StateEnum::ERROR);
1896 assert(readCallback_ == nullptr);
1900 ReadCallback* callback = readCallback_;
1901 readCallback_ = nullptr;
1902 callback->readEOF();
1905 if (maxReadsPerEvent_ && (++numReads >= maxReadsPerEvent_)) {
1906 if (readCallback_ != nullptr) {
1907 // We might still have data in the socket.
1908 // (e.g. see comment in AsyncSSLSocket::checkForImmediateRead)
1909 scheduleImmediateRead();
1917 * This function attempts to write as much data as possible, until no more data
1920 * - If it sends all available data, it unregisters for write events, and stops
1921 * the writeTimeout_.
1923 * - If not all of the data can be sent immediately, it reschedules
1924 * writeTimeout_ (if a non-zero timeout is set), and ensures the handler is
1925 * registered for write events.
1927 void AsyncSocket::handleWrite() noexcept {
1928 VLOG(5) << "AsyncSocket::handleWrite() this=" << this << ", fd=" << fd_
1929 << ", state=" << state_;
1930 DestructorGuard dg(this);
1932 if (state_ == StateEnum::CONNECTING) {
1938 assert(state_ == StateEnum::ESTABLISHED);
1939 assert((shutdownFlags_ & SHUT_WRITE) == 0);
1940 assert(writeReqHead_ != nullptr);
1942 // Loop until we run out of write requests,
1943 // or until this socket is moved to another EventBase.
1944 // (See the comment in handleRead() explaining how this can happen.)
1945 EventBase* originalEventBase = eventBase_;
1946 while (writeReqHead_ != nullptr && eventBase_ == originalEventBase) {
1947 auto writeResult = writeReqHead_->performWrite();
1948 if (writeResult.writeReturn < 0) {
1949 if (writeResult.exception) {
1950 return failWrite(__func__, *writeResult.exception);
1952 auto errnoCopy = errno;
1953 AsyncSocketException ex(
1954 AsyncSocketException::INTERNAL_ERROR,
1955 withAddr("writev() failed"),
1957 return failWrite(__func__, ex);
1958 } else if (writeReqHead_->isComplete()) {
1959 // We finished this request
1960 WriteRequest* req = writeReqHead_;
1961 writeReqHead_ = req->getNext();
1963 if (writeReqHead_ == nullptr) {
1964 writeReqTail_ = nullptr;
1965 // This is the last write request.
1966 // Unregister for write events and cancel the send timer
1967 // before we invoke the callback. We have to update the state properly
1968 // before calling the callback, since it may want to detach us from
1970 if (eventFlags_ & EventHandler::WRITE) {
1971 if (!updateEventRegistration(0, EventHandler::WRITE)) {
1972 assert(state_ == StateEnum::ERROR);
1975 // Stop the send timeout
1976 writeTimeout_.cancelTimeout();
1978 assert(!writeTimeout_.isScheduled());
1980 // If SHUT_WRITE_PENDING is set, we should shutdown the socket after
1981 // we finish sending the last write request.
1983 // We have to do this before invoking writeSuccess(), since
1984 // writeSuccess() may detach us from our EventBase.
1985 if (shutdownFlags_ & SHUT_WRITE_PENDING) {
1986 assert(connectCallback_ == nullptr);
1987 shutdownFlags_ |= SHUT_WRITE;
1989 if (shutdownFlags_ & SHUT_READ) {
1990 // Reads have already been shutdown. Fully close the socket and
1991 // move to STATE_CLOSED.
1993 // Note: This code currently moves us to STATE_CLOSED even if
1994 // close() hasn't ever been called. This can occur if we have
1995 // received EOF from the peer and shutdownWrite() has been called
1996 // locally. Should we bother staying in STATE_ESTABLISHED in this
1997 // case, until close() is actually called? I can't think of a
1998 // reason why we would need to do so. No other operations besides
1999 // calling close() or destroying the socket can be performed at
2001 assert(readCallback_ == nullptr);
2002 state_ = StateEnum::CLOSED;
2004 ioHandler_.changeHandlerFD(-1);
2008 // Reads are still enabled, so we are only doing a half-shutdown
2009 shutdown(fd_, SHUT_WR);
2014 // Invoke the callback
2015 WriteCallback* callback = req->getCallback();
2018 callback->writeSuccess();
2020 // We'll continue around the loop, trying to write another request
2023 if (bufferCallback_) {
2024 bufferCallback_->onEgressBuffered();
2026 writeReqHead_->consume();
2027 // Stop after a partial write; it's highly likely that a subsequent write
2028 // attempt will just return EAGAIN.
2030 // Ensure that we are registered for write events.
2031 if ((eventFlags_ & EventHandler::WRITE) == 0) {
2032 if (!updateEventRegistration(EventHandler::WRITE, 0)) {
2033 assert(state_ == StateEnum::ERROR);
2038 // Reschedule the send timeout, since we have made some write progress.
2039 if (sendTimeout_ > 0) {
2040 if (!writeTimeout_.scheduleTimeout(sendTimeout_)) {
2041 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
2042 withAddr("failed to reschedule write timeout"));
2043 return failWrite(__func__, ex);
2049 if (!writeReqHead_ && bufferCallback_) {
2050 bufferCallback_->onEgressBufferCleared();
2054 void AsyncSocket::checkForImmediateRead() noexcept {
2055 // We currently don't attempt to perform optimistic reads in AsyncSocket.
2056 // (However, note that some subclasses do override this method.)
2058 // Simply calling handleRead() here would be bad, as this would call
2059 // readCallback_->getReadBuffer(), forcing the callback to allocate a read
2060 // buffer even though no data may be available. This would waste lots of
2061 // memory, since the buffer will sit around unused until the socket actually
2062 // becomes readable.
2064 // Checking if the socket is readable now also seems like it would probably
2065 // be a pessimism. In most cases it probably wouldn't be readable, and we
2066 // would just waste an extra system call. Even if it is readable, waiting to
2067 // find out from libevent on the next event loop doesn't seem that bad.
2069 // The exception to this is if we have pre-received data. In that case there
2070 // is definitely data available immediately.
2071 if (preReceivedData_ && !preReceivedData_->empty()) {
2076 void AsyncSocket::handleInitialReadWrite() noexcept {
2077 // Our callers should already be holding a DestructorGuard, but grab
2078 // one here just to make sure, in case one of our calling code paths ever
2080 DestructorGuard dg(this);
2081 // If we have a readCallback_, make sure we enable read events. We
2082 // may already be registered for reads if connectSuccess() set
2083 // the read calback.
2084 if (readCallback_ && !(eventFlags_ & EventHandler::READ)) {
2085 assert(state_ == StateEnum::ESTABLISHED);
2086 assert((shutdownFlags_ & SHUT_READ) == 0);
2087 if (!updateEventRegistration(EventHandler::READ, 0)) {
2088 assert(state_ == StateEnum::ERROR);
2091 checkForImmediateRead();
2092 } else if (readCallback_ == nullptr) {
2093 // Unregister for read events.
2094 updateEventRegistration(0, EventHandler::READ);
2097 // If we have write requests pending, try to send them immediately.
2098 // Since we just finished accepting, there is a very good chance that we can
2099 // write without blocking.
2101 // However, we only process them if EventHandler::WRITE is not already set,
2102 // which means that we're already blocked on a write attempt. (This can
2103 // happen if connectSuccess() called write() before returning.)
2104 if (writeReqHead_ && !(eventFlags_ & EventHandler::WRITE)) {
2105 // Call handleWrite() to perform write processing.
2107 } else if (writeReqHead_ == nullptr) {
2108 // Unregister for write event.
2109 updateEventRegistration(0, EventHandler::WRITE);
2113 void AsyncSocket::handleConnect() noexcept {
2114 VLOG(5) << "AsyncSocket::handleConnect() this=" << this << ", fd=" << fd_
2115 << ", state=" << state_;
2116 assert(state_ == StateEnum::CONNECTING);
2117 // SHUT_WRITE can never be set while we are still connecting;
2118 // SHUT_WRITE_PENDING may be set, be we only set SHUT_WRITE once the connect
2120 assert((shutdownFlags_ & SHUT_WRITE) == 0);
2122 // In case we had a connect timeout, cancel the timeout
2123 writeTimeout_.cancelTimeout();
2124 // We don't use a persistent registration when waiting on a connect event,
2125 // so we have been automatically unregistered now. Update eventFlags_ to
2127 assert(eventFlags_ == EventHandler::WRITE);
2128 eventFlags_ = EventHandler::NONE;
2130 // Call getsockopt() to check if the connect succeeded
2132 socklen_t len = sizeof(error);
2133 int rv = getsockopt(fd_, SOL_SOCKET, SO_ERROR, &error, &len);
2135 auto errnoCopy = errno;
2136 AsyncSocketException ex(
2137 AsyncSocketException::INTERNAL_ERROR,
2138 withAddr("error calling getsockopt() after connect"),
2140 VLOG(4) << "AsyncSocket::handleConnect(this=" << this << ", fd="
2141 << fd_ << " host=" << addr_.describe()
2142 << ") exception:" << ex.what();
2143 return failConnect(__func__, ex);
2147 AsyncSocketException ex(AsyncSocketException::NOT_OPEN,
2148 "connect failed", error);
2149 VLOG(1) << "AsyncSocket::handleConnect(this=" << this << ", fd="
2150 << fd_ << " host=" << addr_.describe()
2151 << ") exception: " << ex.what();
2152 return failConnect(__func__, ex);
2155 // Move into STATE_ESTABLISHED
2156 state_ = StateEnum::ESTABLISHED;
2158 // If SHUT_WRITE_PENDING is set and we don't have any write requests to
2159 // perform, immediately shutdown the write half of the socket.
2160 if ((shutdownFlags_ & SHUT_WRITE_PENDING) && writeReqHead_ == nullptr) {
2161 // SHUT_READ shouldn't be set. If close() is called on the socket while we
2162 // are still connecting we just abort the connect rather than waiting for
2164 assert((shutdownFlags_ & SHUT_READ) == 0);
2165 shutdown(fd_, SHUT_WR);
2166 shutdownFlags_ |= SHUT_WRITE;
2169 VLOG(7) << "AsyncSocket " << this << ": fd " << fd_
2170 << "successfully connected; state=" << state_;
2172 // Remember the EventBase we are attached to, before we start invoking any
2173 // callbacks (since the callbacks may call detachEventBase()).
2174 EventBase* originalEventBase = eventBase_;
2176 invokeConnectSuccess();
2177 // Note that the connect callback may have changed our state.
2178 // (set or unset the read callback, called write(), closed the socket, etc.)
2179 // The following code needs to handle these situations correctly.
2181 // If the socket has been closed, readCallback_ and writeReqHead_ will
2182 // always be nullptr, so that will prevent us from trying to read or write.
2184 // The main thing to check for is if eventBase_ is still originalEventBase.
2185 // If not, we have been detached from this event base, so we shouldn't
2186 // perform any more operations.
2187 if (eventBase_ != originalEventBase) {
2191 handleInitialReadWrite();
2194 void AsyncSocket::timeoutExpired() noexcept {
2195 VLOG(7) << "AsyncSocket " << this << ", fd " << fd_ << ": timeout expired: "
2196 << "state=" << state_ << ", events=" << std::hex << eventFlags_;
2197 DestructorGuard dg(this);
2198 eventBase_->dcheckIsInEventBaseThread();
2200 if (state_ == StateEnum::CONNECTING) {
2201 // connect() timed out
2202 // Unregister for I/O events.
2203 if (connectCallback_) {
2204 AsyncSocketException ex(
2205 AsyncSocketException::TIMED_OUT,
2207 "connect timed out after {}ms", connectTimeout_.count()));
2208 failConnect(__func__, ex);
2210 // we faced a connect error without a connect callback, which could
2211 // happen due to TFO.
2212 AsyncSocketException ex(
2213 AsyncSocketException::TIMED_OUT, "write timed out during connection");
2214 failWrite(__func__, ex);
2217 // a normal write operation timed out
2218 AsyncSocketException ex(
2219 AsyncSocketException::TIMED_OUT,
2220 folly::sformat("write timed out after {}ms", sendTimeout_));
2221 failWrite(__func__, ex);
2225 ssize_t AsyncSocket::tfoSendMsg(int fd, struct msghdr* msg, int msg_flags) {
2226 return detail::tfo_sendmsg(fd, msg, msg_flags);
2229 AsyncSocket::WriteResult
2230 AsyncSocket::sendSocketMessage(int fd, struct msghdr* msg, int msg_flags) {
2231 ssize_t totalWritten = 0;
2232 if (state_ == StateEnum::FAST_OPEN) {
2233 sockaddr_storage addr;
2234 auto len = addr_.getAddress(&addr);
2235 msg->msg_name = &addr;
2236 msg->msg_namelen = len;
2237 totalWritten = tfoSendMsg(fd_, msg, msg_flags);
2238 if (totalWritten >= 0) {
2239 tfoFinished_ = true;
2240 state_ = StateEnum::ESTABLISHED;
2241 // We schedule this asynchrously so that we don't end up
2242 // invoking initial read or write while a write is in progress.
2243 scheduleInitialReadWrite();
2244 } else if (errno == EINPROGRESS) {
2245 VLOG(4) << "TFO falling back to connecting";
2246 // A normal sendmsg doesn't return EINPROGRESS, however
2247 // TFO might fallback to connecting if there is no
2249 state_ = StateEnum::CONNECTING;
2251 scheduleConnectTimeout();
2252 registerForConnectEvents();
2253 } catch (const AsyncSocketException& ex) {
2255 WRITE_ERROR, std::make_unique<AsyncSocketException>(ex));
2257 // Let's fake it that no bytes were written and return an errno.
2260 } else if (errno == EOPNOTSUPP) {
2261 // Try falling back to connecting.
2262 VLOG(4) << "TFO not supported";
2263 state_ = StateEnum::CONNECTING;
2265 int ret = socketConnect((const sockaddr*)&addr, len);
2267 // connect succeeded immediately
2268 // Treat this like no data was written.
2269 state_ = StateEnum::ESTABLISHED;
2270 scheduleInitialReadWrite();
2272 // If there was no exception during connections,
2273 // we would return that no bytes were written.
2276 } catch (const AsyncSocketException& ex) {
2278 WRITE_ERROR, std::make_unique<AsyncSocketException>(ex));
2280 } else if (errno == EAGAIN) {
2281 // Normally sendmsg would indicate that the write would block.
2282 // However in the fast open case, it would indicate that sendmsg
2283 // fell back to a connect. This is a return code from connect()
2284 // instead, and is an error condition indicating no fds available.
2287 std::make_unique<AsyncSocketException>(
2288 AsyncSocketException::UNKNOWN, "No more free local ports"));
2291 totalWritten = ::sendmsg(fd, msg, msg_flags);
2293 return WriteResult(totalWritten);
2296 AsyncSocket::WriteResult AsyncSocket::performWrite(
2300 uint32_t* countWritten,
2301 uint32_t* partialWritten) {
2302 // We use sendmsg() instead of writev() so that we can pass in MSG_NOSIGNAL
2303 // We correctly handle EPIPE errors, so we never want to receive SIGPIPE
2304 // (since it may terminate the program if the main program doesn't explicitly
2307 msg.msg_name = nullptr;
2308 msg.msg_namelen = 0;
2309 msg.msg_iov = const_cast<iovec *>(vec);
2310 msg.msg_iovlen = std::min<size_t>(count, kIovMax);
2312 msg.msg_controllen = sendMsgParamCallback_->getAncillaryDataSize(flags);
2313 CHECK_GE(AsyncSocket::SendMsgParamsCallback::maxAncillaryDataSize,
2314 msg.msg_controllen);
2316 if (msg.msg_controllen != 0) {
2317 msg.msg_control = reinterpret_cast<char*>(alloca(msg.msg_controllen));
2318 sendMsgParamCallback_->getAncillaryData(flags, msg.msg_control);
2320 msg.msg_control = nullptr;
2322 int msg_flags = sendMsgParamCallback_->getFlags(flags, zeroCopyEnabled_);
2324 auto writeResult = sendSocketMessage(fd_, &msg, msg_flags);
2325 auto totalWritten = writeResult.writeReturn;
2326 if (totalWritten < 0) {
2327 bool tryAgain = (errno == EAGAIN);
2329 // Apple has a bug where doing a second write on a socket which we
2330 // have opened with TFO causes an ENOTCONN to be thrown. However the
2331 // socket is really connected, so treat ENOTCONN as a EAGAIN until
2332 // this bug is fixed.
2333 tryAgain |= (errno == ENOTCONN);
2336 // workaround for running with zerocopy enabled but without a proper
2337 // memlock value - see ulimit -l
2338 if (zeroCopyEnabled_ && (errno == ENOBUFS)) {
2340 zeroCopyEnabled_ = false;
2343 if (!writeResult.exception && tryAgain) {
2344 // TCP buffer is full; we can't write any more data right now.
2346 *partialWritten = 0;
2347 return WriteResult(0);
2351 *partialWritten = 0;
2355 appBytesWritten_ += totalWritten;
2357 uint32_t bytesWritten;
2359 for (bytesWritten = uint32_t(totalWritten), n = 0; n < count; ++n) {
2360 const iovec* v = vec + n;
2361 if (v->iov_len > bytesWritten) {
2362 // Partial write finished in the middle of this iovec
2364 *partialWritten = bytesWritten;
2365 return WriteResult(totalWritten);
2368 bytesWritten -= uint32_t(v->iov_len);
2371 assert(bytesWritten == 0);
2373 *partialWritten = 0;
2374 return WriteResult(totalWritten);
2378 * Re-register the EventHandler after eventFlags_ has changed.
2380 * If an error occurs, fail() is called to move the socket into the error state
2381 * and call all currently installed callbacks. After an error, the
2382 * AsyncSocket is completely unregistered.
2384 * @return Returns true on success, or false on error.
2386 bool AsyncSocket::updateEventRegistration() {
2387 VLOG(5) << "AsyncSocket::updateEventRegistration(this=" << this
2388 << ", fd=" << fd_ << ", evb=" << eventBase_ << ", state=" << state_
2389 << ", events=" << std::hex << eventFlags_;
2390 eventBase_->dcheckIsInEventBaseThread();
2391 if (eventFlags_ == EventHandler::NONE) {
2392 ioHandler_.unregisterHandler();
2396 // Always register for persistent events, so we don't have to re-register
2397 // after being called back.
2398 if (!ioHandler_.registerHandler(
2399 uint16_t(eventFlags_ | EventHandler::PERSIST))) {
2400 eventFlags_ = EventHandler::NONE; // we're not registered after error
2401 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
2402 withAddr("failed to update AsyncSocket event registration"));
2403 fail("updateEventRegistration", ex);
2410 bool AsyncSocket::updateEventRegistration(uint16_t enable,
2412 uint16_t oldFlags = eventFlags_;
2413 eventFlags_ |= enable;
2414 eventFlags_ &= ~disable;
2415 if (eventFlags_ == oldFlags) {
2418 return updateEventRegistration();
2422 void AsyncSocket::startFail() {
2423 // startFail() should only be called once
2424 assert(state_ != StateEnum::ERROR);
2425 assert(getDestructorGuardCount() > 0);
2426 state_ = StateEnum::ERROR;
2427 // Ensure that SHUT_READ and SHUT_WRITE are set,
2428 // so all future attempts to read or write will be rejected
2429 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE);
2431 if (eventFlags_ != EventHandler::NONE) {
2432 eventFlags_ = EventHandler::NONE;
2433 ioHandler_.unregisterHandler();
2435 writeTimeout_.cancelTimeout();
2438 ioHandler_.changeHandlerFD(-1);
2443 void AsyncSocket::invokeAllErrors(const AsyncSocketException& ex) {
2444 invokeConnectErr(ex);
2447 if (readCallback_) {
2448 ReadCallback* callback = readCallback_;
2449 readCallback_ = nullptr;
2450 callback->readErr(ex);
2454 void AsyncSocket::finishFail() {
2455 assert(state_ == StateEnum::ERROR);
2456 assert(getDestructorGuardCount() > 0);
2458 AsyncSocketException ex(
2459 AsyncSocketException::INTERNAL_ERROR,
2460 withAddr("socket closing after error"));
2461 invokeAllErrors(ex);
2464 void AsyncSocket::finishFail(const AsyncSocketException& ex) {
2465 assert(state_ == StateEnum::ERROR);
2466 assert(getDestructorGuardCount() > 0);
2467 invokeAllErrors(ex);
2470 void AsyncSocket::fail(const char* fn, const AsyncSocketException& ex) {
2471 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
2472 << state_ << " host=" << addr_.describe()
2473 << "): failed in " << fn << "(): "
2479 void AsyncSocket::failConnect(const char* fn, const AsyncSocketException& ex) {
2480 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
2481 << state_ << " host=" << addr_.describe()
2482 << "): failed while connecting in " << fn << "(): "
2486 invokeConnectErr(ex);
2490 void AsyncSocket::failRead(const char* fn, const AsyncSocketException& ex) {
2491 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
2492 << state_ << " host=" << addr_.describe()
2493 << "): failed while reading in " << fn << "(): "
2497 if (readCallback_ != nullptr) {
2498 ReadCallback* callback = readCallback_;
2499 readCallback_ = nullptr;
2500 callback->readErr(ex);
2506 void AsyncSocket::failErrMessageRead(const char* fn,
2507 const AsyncSocketException& ex) {
2508 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
2509 << state_ << " host=" << addr_.describe()
2510 << "): failed while reading message in " << fn << "(): "
2514 if (errMessageCallback_ != nullptr) {
2515 ErrMessageCallback* callback = errMessageCallback_;
2516 errMessageCallback_ = nullptr;
2517 callback->errMessageError(ex);
2523 void AsyncSocket::failWrite(const char* fn, const AsyncSocketException& ex) {
2524 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
2525 << state_ << " host=" << addr_.describe()
2526 << "): failed while writing in " << fn << "(): "
2530 // Only invoke the first write callback, since the error occurred while
2531 // writing this request. Let any other pending write callbacks be invoked in
2533 if (writeReqHead_ != nullptr) {
2534 WriteRequest* req = writeReqHead_;
2535 writeReqHead_ = req->getNext();
2536 WriteCallback* callback = req->getCallback();
2537 uint32_t bytesWritten = req->getTotalBytesWritten();
2540 callback->writeErr(bytesWritten, ex);
2547 void AsyncSocket::failWrite(const char* fn, WriteCallback* callback,
2548 size_t bytesWritten,
2549 const AsyncSocketException& ex) {
2550 // This version of failWrite() is used when the failure occurs before
2551 // we've added the callback to writeReqHead_.
2552 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
2553 << state_ << " host=" << addr_.describe()
2554 <<"): failed while writing in " << fn << "(): "
2558 if (callback != nullptr) {
2559 callback->writeErr(bytesWritten, ex);
2565 void AsyncSocket::failAllWrites(const AsyncSocketException& ex) {
2566 // Invoke writeError() on all write callbacks.
2567 // This is used when writes are forcibly shutdown with write requests
2568 // pending, or when an error occurs with writes pending.
2569 while (writeReqHead_ != nullptr) {
2570 WriteRequest* req = writeReqHead_;
2571 writeReqHead_ = req->getNext();
2572 WriteCallback* callback = req->getCallback();
2574 callback->writeErr(req->getTotalBytesWritten(), ex);
2580 void AsyncSocket::invalidState(ConnectCallback* callback) {
2581 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_
2582 << "): connect() called in invalid state " << state_;
2585 * The invalidState() methods don't use the normal failure mechanisms,
2586 * since we don't know what state we are in. We don't want to call
2587 * startFail()/finishFail() recursively if we are already in the middle of
2591 AsyncSocketException ex(AsyncSocketException::ALREADY_OPEN,
2592 "connect() called with socket in invalid state");
2593 connectEndTime_ = std::chrono::steady_clock::now();
2594 if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
2596 callback->connectErr(ex);
2599 // We can't use failConnect() here since connectCallback_
2600 // may already be set to another callback. Invoke this ConnectCallback
2601 // here; any other connectCallback_ will be invoked in finishFail()
2604 callback->connectErr(ex);
2610 void AsyncSocket::invalidState(ErrMessageCallback* callback) {
2611 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_
2612 << "): setErrMessageCB(" << callback
2613 << ") called in invalid state " << state_;
2615 AsyncSocketException ex(
2616 AsyncSocketException::NOT_OPEN,
2617 msgErrQueueSupported
2618 ? "setErrMessageCB() called with socket in invalid state"
2619 : "This platform does not support socket error message notifications");
2620 if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
2622 callback->errMessageError(ex);
2627 callback->errMessageError(ex);
2633 void AsyncSocket::invokeConnectErr(const AsyncSocketException& ex) {
2634 connectEndTime_ = std::chrono::steady_clock::now();
2635 if (connectCallback_) {
2636 ConnectCallback* callback = connectCallback_;
2637 connectCallback_ = nullptr;
2638 callback->connectErr(ex);
2642 void AsyncSocket::invokeConnectSuccess() {
2643 connectEndTime_ = std::chrono::steady_clock::now();
2644 if (connectCallback_) {
2645 ConnectCallback* callback = connectCallback_;
2646 connectCallback_ = nullptr;
2647 callback->connectSuccess();
2651 void AsyncSocket::invalidState(ReadCallback* callback) {
2652 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_
2653 << "): setReadCallback(" << callback
2654 << ") called in invalid state " << state_;
2656 AsyncSocketException ex(AsyncSocketException::NOT_OPEN,
2657 "setReadCallback() called with socket in "
2659 if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
2661 callback->readErr(ex);
2666 callback->readErr(ex);
2672 void AsyncSocket::invalidState(WriteCallback* callback) {
2673 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_
2674 << "): write() called in invalid state " << state_;
2676 AsyncSocketException ex(AsyncSocketException::NOT_OPEN,
2677 withAddr("write() called with socket in invalid state"));
2678 if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
2680 callback->writeErr(0, ex);
2685 callback->writeErr(0, ex);
2691 void AsyncSocket::doClose() {
2695 if (const auto shutdownSocketSet = wShutdownSocketSet_.lock()) {
2696 shutdownSocketSet->close(fd_);
2703 std::ostream& operator << (std::ostream& os,
2704 const AsyncSocket::StateEnum& state) {
2705 os << static_cast<int>(state);
2709 std::string AsyncSocket::withAddr(const std::string& s) {
2710 // Don't use addr_ directly because it may not be initialized
2711 // e.g. if constructed from fd
2712 folly::SocketAddress peer, local;
2714 getPeerAddress(&peer);
2715 getLocalAddress(&local);
2716 } catch (const std::exception&) {
2721 return s + " (peer=" + peer.describe() + ", local=" + local.describe() + ")";
2724 void AsyncSocket::setBufferCallback(BufferCallback* cb) {
2725 bufferCallback_ = cb;
2728 } // namespace folly