writeFlags |= WriteFlags::CORK;
}
- socket_->adjustZeroCopyFlags(getOps(), getOpCount(), writeFlags);
+ socket_->adjustZeroCopyFlags(writeFlags);
auto writeResult = socket_->performWrite(
getOps(), getOpCount(), writeFlags, &opsWritten_, &partialBytes_);
return false;
}
-void AsyncSocket::setZeroCopyWriteChainThreshold(size_t threshold) {
- zeroCopyWriteChainThreshold_ = threshold;
-}
-
bool AsyncSocket::isZeroCopyRequest(WriteFlags flags) {
return (zeroCopyEnabled_ && isSet(flags, WriteFlags::WRITE_MSG_ZEROCOPY));
}
-void AsyncSocket::adjustZeroCopyFlags(
- folly::IOBuf* buf,
- folly::WriteFlags& flags) {
- if (zeroCopyEnabled_ && zeroCopyWriteChainThreshold_ && buf &&
- buf->isManaged()) {
- if (buf->computeChainDataLength() >= zeroCopyWriteChainThreshold_) {
- flags |= folly::WriteFlags::WRITE_MSG_ZEROCOPY;
- } else {
- flags = unSet(flags, folly::WriteFlags::WRITE_MSG_ZEROCOPY);
- }
- }
-}
-
-void AsyncSocket::adjustZeroCopyFlags(
- const iovec* vec,
- uint32_t count,
- folly::WriteFlags& flags) {
- if (zeroCopyEnabled_ && zeroCopyWriteChainThreshold_) {
- count = std::min<uint32_t>(count, kIovMax);
- size_t sum = 0;
- for (uint32_t i = 0; i < count; ++i) {
- const iovec* v = vec + i;
- sum += v->iov_len;
- }
-
- if (sum >= zeroCopyWriteChainThreshold_) {
- flags |= folly::WriteFlags::WRITE_MSG_ZEROCOPY;
- } else {
- flags = unSet(flags, folly::WriteFlags::WRITE_MSG_ZEROCOPY);
- }
+void AsyncSocket::adjustZeroCopyFlags(folly::WriteFlags& flags) {
+ if (!zeroCopyEnabled_) {
+ flags = unSet(flags, folly::WriteFlags::WRITE_MSG_ZEROCOPY);
}
}
void AsyncSocket::writeChain(WriteCallback* callback, unique_ptr<IOBuf>&& buf,
WriteFlags flags) {
- adjustZeroCopyFlags(buf.get(), flags);
+ adjustZeroCopyFlags(flags);
constexpr size_t kSmallSizeMax = 64;
size_t count = buf->countChainElements();
// supporting per-socket error queues.
VLOG(5) << "AsyncSocket::handleErrMessages() this=" << this << ", fd=" << fd_
<< ", state=" << state_;
- if (errMessageCallback_ == nullptr &&
- (!zeroCopyEnabled_ || idZeroCopyBufPtrMap_.empty())) {
+ if (errMessageCallback_ == nullptr && idZeroCopyBufPtrMap_.empty()) {
VLOG(7) << "AsyncSocket::handleErrMessages(): "
<< "no callback installed - exiting.";
return;
// this bug is fixed.
tryAgain |= (errno == ENOTCONN);
#endif
+
+ // workaround for running with zerocopy enabled but without a proper
+ // memlock value - see ulimit -l
+ if (zeroCopyEnabled_ && (errno == ENOBUFS)) {
+ tryAgain = true;
+ zeroCopyEnabled_ = false;
+ }
+
if (!writeResult.exception && tryAgain) {
// TCP buffer is full; we can't write any more data right now.
*countWritten = 0;
void setReadCB(ReadCallback* callback) override;
ReadCallback* getReadCallback() const override;
- static const size_t kDefaultZeroCopyThreshold = 0;
-
bool setZeroCopy(bool enable);
bool getZeroCopy() const {
return zeroCopyEnabled_;
}
- void setZeroCopyWriteChainThreshold(size_t threshold);
- size_t getZeroCopyWriteChainThreshold() const {
- return zeroCopyWriteChainThreshold_;
- }
-
uint32_t getZeroCopyBufId() const {
return zeroCopyBufId_;
}
uint32_t getNextZeroCopyBufId() {
return zeroCopyBufId_++;
}
- void adjustZeroCopyFlags(folly::IOBuf* buf, folly::WriteFlags& flags);
- void adjustZeroCopyFlags(
- const iovec* vec,
- uint32_t count,
- folly::WriteFlags& flags);
+ void adjustZeroCopyFlags(folly::WriteFlags& flags);
void addZeroCopyBuf(std::unique_ptr<folly::IOBuf>&& buf);
void addZeroCopyBuf(folly::IOBuf* ptr);
void setZeroCopyBuf(std::unique_ptr<folly::IOBuf>&& buf);
bool trackEor_{false};
bool zeroCopyEnabled_{false};
bool zeroCopyVal_{false};
- size_t zeroCopyWriteChainThreshold_{kDefaultZeroCopyThreshold};
};
#ifdef _MSC_VER
#pragma vtordisp(pop)