*/
class Appender : public detail::Writable<Appender> {
public:
- Appender(IOBuf* buf, uint32_t growth)
+ Appender(IOBuf* buf, uint64_t growth)
: buffer_(buf),
crtBuf_(buf->prev()),
growth_(growth) {
* Ensure at least n contiguous bytes available to write.
* Postcondition: length() >= n.
*/
- void ensure(uint32_t n) {
+ void ensure(uint64_t n) {
if (LIKELY(length() >= n)) {
return;
}
IOBuf* buffer_;
IOBuf* crtBuf_;
- uint32_t growth_;
+ uint64_t growth_;
};
class QueueAppender : public detail::Writable<QueueAppender> {
* space in the queue, we grow no more than growth bytes at once
* (unless you call ensure() with a bigger value yourself).
*/
- QueueAppender(IOBufQueue* queue, uint32_t growth) {
+ QueueAppender(IOBufQueue* queue, uint64_t growth) {
reset(queue, growth);
}
- void reset(IOBufQueue* queue, uint32_t growth) {
+ void reset(IOBufQueue* queue, uint64_t growth) {
queue_ = queue;
growth_ = growth;
}
// Ensure at least n contiguous; can go above growth_, throws if
// not enough room.
- void ensure(uint32_t n) { queue_->preallocate(n, growth_); }
+ void ensure(uint64_t n) { queue_->preallocate(n, growth_); }
template <class T>
typename std::enable_if<std::is_integral<T>::value>::type
kDataInUse = 0x02,
};
-enum : uint32_t {
+enum : uint64_t {
// When create() is called for buffers less than kDefaultCombinedBufSize,
// we allocate a single combined memory segment for the IOBuf and the data
// together. See the comments for createCombined()/createSeparate() for more
};
struct IOBuf::HeapFullStorage {
+ // Make sure jemalloc allocates from the 64-byte class. Putting this here
+ // because HeapStorage is private so it can't be at namespace level.
+ static_assert(sizeof(HeapStorage) <= 64,
+ "IOBuf may not grow over 56 bytes!");
+
HeapStorage hs;
SharedInfo shared;
MaxAlign align;
releaseStorage(storage, kDataInUse);
}
-IOBuf::IOBuf(CreateOp, uint32_t capacity)
+IOBuf::IOBuf(CreateOp, uint64_t capacity)
: next_(this),
prev_(this),
data_(nullptr),
length_(0),
- flags_(0),
- type_(kExtAllocated) {
- allocExtBuffer(capacity, &buf_, &sharedInfo_, &capacity_);
+ flagsAndSharedInfo_(0) {
+ SharedInfo* info;
+ allocExtBuffer(capacity, &buf_, &info, &capacity_);
+ setSharedInfo(info);
data_ = buf_;
}
-IOBuf::IOBuf(CopyBufferOp op, const void* buf, uint32_t size,
- uint32_t headroom, uint32_t minTailroom)
+IOBuf::IOBuf(CopyBufferOp op, const void* buf, uint64_t size,
+ uint64_t headroom, uint64_t minTailroom)
: IOBuf(CREATE, headroom + size + minTailroom) {
advance(headroom);
memcpy(writableData(), buf, size);
}
IOBuf::IOBuf(CopyBufferOp op, ByteRange br,
- uint32_t headroom, uint32_t minTailroom)
+ uint64_t headroom, uint64_t minTailroom)
: IOBuf(op, br.data(), br.size(), headroom, minTailroom) {
}
-unique_ptr<IOBuf> IOBuf::create(uint32_t capacity) {
+unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
// For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
// all with a single allocation.
//
return createSeparate(capacity);
}
-unique_ptr<IOBuf> IOBuf::createCombined(uint32_t capacity) {
+unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
// To save a memory allocation, allocate space for the IOBuf object, the
// SharedInfo struct, and the data itself all with a single call to malloc().
size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
size_t actualCapacity = storageEnd - bufAddr;
unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
- kCombinedAlloc, 0, bufAddr, actualCapacity,
- bufAddr, 0, &storage->shared));
+ InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
+ bufAddr, actualCapacity, bufAddr, 0));
return ret;
}
-unique_ptr<IOBuf> IOBuf::createSeparate(uint32_t capacity) {
+unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
return make_unique<IOBuf>(CREATE, capacity);
}
unique_ptr<IOBuf> IOBuf::createChain(
- size_t totalCapacity, uint32_t maxBufCapacity) {
+ size_t totalCapacity, uint64_t maxBufCapacity) {
unique_ptr<IOBuf> out = create(
std::min(totalCapacity, size_t(maxBufCapacity)));
size_t allocatedCapacity = out->capacity();
return out;
}
-IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint32_t capacity, uint32_t length,
+IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
FreeFunction freeFn, void* userData,
bool freeOnError)
: next_(this),
buf_(static_cast<uint8_t*>(buf)),
length_(length),
capacity_(capacity),
- flags_(kFlagFreeSharedInfo),
- type_(kExtUserSupplied) {
+ flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
try {
- sharedInfo_ = new SharedInfo(freeFn, userData);
+ setSharedInfo(new SharedInfo(freeFn, userData));
} catch (...) {
takeOwnershipError(freeOnError, buf, freeFn, userData);
throw;
}
}
-unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint32_t capacity,
- uint32_t length,
+unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
+ uint64_t length,
FreeFunction freeFn,
void* userData,
bool freeOnError) {
}
}
-IOBuf::IOBuf(WrapBufferOp, const void* buf, uint32_t capacity)
- : IOBuf(kExtUserOwned, kFlagUserOwned,
+IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
+ : IOBuf(InternalConstructor(), 0,
// We cast away the const-ness of the buffer here.
// This is okay since IOBuf users must use unshare() to create a copy
// of this buffer before writing to the buffer.
static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
- static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
- nullptr) {
+ static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {
}
IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
- : IOBuf(op, br.data(), folly::to<uint32_t>(br.size())) {
+ : IOBuf(op, br.data(), br.size()) {
}
-unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint32_t capacity) {
+unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
return make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
}
*this = std::move(other);
}
-IOBuf::IOBuf(ExtBufTypeEnum type,
- uint32_t flags,
+IOBuf::IOBuf(InternalConstructor,
+ uintptr_t flagsAndSharedInfo,
uint8_t* buf,
- uint32_t capacity,
+ uint64_t capacity,
uint8_t* data,
- uint32_t length,
- SharedInfo* sharedInfo)
+ uint64_t length)
: next_(this),
prev_(this),
data_(data),
buf_(buf),
length_(length),
capacity_(capacity),
- flags_(flags),
- type_(type),
- sharedInfo_(sharedInfo) {
+ flagsAndSharedInfo_(flagsAndSharedInfo) {
assert(data >= buf);
assert(data + length <= buf + capacity);
- assert(static_cast<bool>(flags & kFlagUserOwned) ==
- (sharedInfo == NULL));
}
IOBuf::~IOBuf() {
buf_ = other.buf_;
length_ = other.length_;
capacity_ = other.capacity_;
- flags_ = other.flags_;
- type_ = other.type_;
- sharedInfo_ = other.sharedInfo_;
+ flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
// Reset other so it is a clean state to be destroyed.
other.data_ = nullptr;
other.buf_ = nullptr;
other.length_ = 0;
other.capacity_ = 0;
- other.flags_ = kFlagUserOwned;
- other.type_ = kExtUserOwned;
- other.sharedInfo_ = nullptr;
+ other.flagsAndSharedInfo_ = 0;
// If other was part of the chain, assume ownership of the rest of its chain.
// (It's only valid to perform move assignment on the head of a chain.)
// Sanity check to make sure that other is in a valid state to be destroyed.
DCHECK_EQ(other.prev_, &other);
DCHECK_EQ(other.next_, &other);
- DCHECK(other.flags_ & kFlagUserOwned);
return *this;
}
return true;
}
-uint32_t IOBuf::countChainElements() const {
- uint32_t numElements = 1;
+size_t IOBuf::countChainElements() const {
+ size_t numElements = 1;
for (IOBuf* current = next_; current != this; current = current->next_) {
++numElements;
}
}
void IOBuf::cloneOneInto(IOBuf& other) const {
- if (sharedInfo_) {
- flags_ |= kFlagMaybeShared;
+ SharedInfo* info = sharedInfo();
+ if (info) {
+ setFlags(kFlagMaybeShared);
}
- other = IOBuf(static_cast<ExtBufTypeEnum>(type_),
- flags_, buf_, capacity_,
- data_, length_,
- sharedInfo_);
- if (sharedInfo_) {
- sharedInfo_->refcount.fetch_add(1, std::memory_order_acq_rel);
+ other = IOBuf(InternalConstructor(),
+ flagsAndSharedInfo_, buf_, capacity_,
+ data_, length_);
+ if (info) {
+ info->refcount.fetch_add(1, std::memory_order_acq_rel);
}
}
// Allocate a new buffer for the data
uint8_t* buf;
SharedInfo* sharedInfo;
- uint32_t actualCapacity;
+ uint64_t actualCapacity;
allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
// Copy the data
// Maintain the same amount of headroom. Since we maintained the same
// minimum capacity we also maintain at least the same amount of tailroom.
- uint32_t headlen = headroom();
+ uint64_t headlen = headroom();
memcpy(buf + headlen, data_, length_);
// Release our reference on the old buffer
decrementRefcount();
- // Make sure kFlagUserOwned, kFlagMaybeShared, and kFlagFreeSharedInfo
- // are all cleared.
- flags_ = 0;
+ // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
+ setFlagsAndSharedInfo(0, sharedInfo);
// Update the buffer pointers to point to the new buffer
data_ = buf + headlen;
buf_ = buf;
- sharedInfo_ = sharedInfo;
}
void IOBuf::unshareChained() {
// internal buffer before.
uint8_t* newBuf;
SharedInfo* newInfo;
- uint32_t actualCapacity;
+ uint64_t actualCapacity;
allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
// Copy the data into the new buffer
// Point at the new buffer
decrementRefcount();
- // Make sure kFlagUserOwned, kFlagMaybeShared, and kFlagFreeSharedInfo
- // are all cleared.
- flags_ = 0;
+ // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
+ setFlagsAndSharedInfo(0, newInfo);
capacity_ = actualCapacity;
- type_ = kExtAllocated;
buf_ = newBuf;
- sharedInfo_ = newInfo;
data_ = newData;
length_ = newLength;
void IOBuf::decrementRefcount() {
// Externally owned buffers don't have a SharedInfo object and aren't managed
// by the reference count
- if (flags_ & kFlagUserOwned) {
- assert(sharedInfo_ == nullptr);
+ SharedInfo* info = sharedInfo();
+ if (!info) {
return;
}
// Decrement the refcount
- uint32_t newcnt = sharedInfo_->refcount.fetch_sub(
+ uint32_t newcnt = info->refcount.fetch_sub(
1, std::memory_order_acq_rel);
// Note that fetch_sub() returns the value before we decremented.
// If it is 1, we were the only remaining user; if it is greater there are
// takeOwnership() store the user's free function with its allocated
// SharedInfo object.) However, handling this specially with a flag seems
// like it shouldn't be problematic.
- if (flags_ & kFlagFreeSharedInfo) {
- delete sharedInfo_;
+ if (flags() & kFlagFreeSharedInfo) {
+ delete sharedInfo();
}
}
-void IOBuf::reserveSlow(uint32_t minHeadroom, uint32_t minTailroom) {
+void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
DCHECK_LT(newCapacity, UINT32_MAX);
size_t newAllocatedCapacity = goodExtBufferSize(newCapacity);
uint8_t* newBuffer = nullptr;
- uint32_t newHeadroom = 0;
- uint32_t oldHeadroom = headroom();
+ uint64_t newHeadroom = 0;
+ uint64_t oldHeadroom = headroom();
// If we have a buffer allocated with malloc and we just need more tailroom,
// try to use realloc()/rallocm() to grow the buffer in place.
- if ((flags_ & kFlagUserOwned) == 0 && (sharedInfo_->freeFn == nullptr) &&
- length_ != 0 && oldHeadroom >= minHeadroom) {
+ SharedInfo* info = sharedInfo();
+ if (info && (info->freeFn == nullptr) && length_ != 0 &&
+ oldHeadroom >= minHeadroom) {
if (usingJEMalloc()) {
size_t headSlack = oldHeadroom - minHeadroom;
// We assume that tailroom is more useful and more important than
}
newBuffer = static_cast<uint8_t*>(p);
memcpy(newBuffer + minHeadroom, data_, length_);
- if ((flags_ & kFlagUserOwned) == 0) {
+ if (sharedInfo()) {
freeExtBuffer();
}
newHeadroom = minHeadroom;
}
- SharedInfo* info;
- uint32_t cap;
+ uint64_t cap;
initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
- if (flags_ & kFlagFreeSharedInfo) {
- delete sharedInfo_;
+ if (flags() & kFlagFreeSharedInfo) {
+ delete sharedInfo();
}
- flags_ = 0;
+ setFlagsAndSharedInfo(0, info);
capacity_ = cap;
- type_ = kExtAllocated;
buf_ = newBuffer;
- sharedInfo_ = info;
data_ = newBuffer + newHeadroom;
// length_ is unchanged
}
void IOBuf::freeExtBuffer() {
- DCHECK((flags_ & kFlagUserOwned) == 0);
+ SharedInfo* info = sharedInfo();
+ DCHECK(info);
- if (sharedInfo_->freeFn) {
+ if (info->freeFn) {
try {
- sharedInfo_->freeFn(buf_, sharedInfo_->userData);
+ info->freeFn(buf_, info->userData);
} catch (...) {
// The user's free function should never throw. Otherwise we might
// throw from the IOBuf destructor. Other code paths like coalesce()
}
}
-void IOBuf::allocExtBuffer(uint32_t minCapacity,
+void IOBuf::allocExtBuffer(uint64_t minCapacity,
uint8_t** bufReturn,
SharedInfo** infoReturn,
- uint32_t* capacityReturn) {
+ uint64_t* capacityReturn) {
size_t mallocSize = goodExtBufferSize(minCapacity);
uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
if (UNLIKELY(buf == NULL)) {
*bufReturn = buf;
}
-size_t IOBuf::goodExtBufferSize(uint32_t minCapacity) {
+size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
// Determine how much space we should allocate. We'll store the SharedInfo
// for the external buffer just after the buffer itself. (We store it just
// after the buffer rather than just before so that the code can still just
void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
SharedInfo** infoReturn,
- uint32_t* capacityReturn) {
+ uint64_t* capacityReturn) {
// Find the SharedInfo storage at the end of the buffer
// and construct the SharedInfo.
uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
SharedInfo* sharedInfo = new(infoStart) SharedInfo;
- size_t actualCapacity = infoStart - buf;
- // On the unlikely possibility that the actual capacity is larger than can
- // fit in a uint32_t after adding room for the refcount and calling
- // goodMallocSize(), truncate downwards if necessary.
- if (actualCapacity >= UINT32_MAX) {
- *capacityReturn = UINT32_MAX;
- } else {
- *capacityReturn = actualCapacity;
- }
-
+ *capacityReturn = infoStart - buf;
*infoReturn = sharedInfo;
}
fbstring IOBuf::moveToFbString() {
// malloc-allocated buffers are just fine, everything else needs
// to be turned into one.
- if ((flags_ & kFlagUserOwned) || // user owned, not ours to give up
- sharedInfo_->freeFn != nullptr || // not malloc()-ed
- headroom() != 0 || // malloc()-ed block doesn't start at beginning
- tailroom() == 0 || // no room for NUL terminator
- isShared() || // shared
- isChained()) { // chained
+ if (!sharedInfo() || // user owned, not ours to give up
+ sharedInfo()->freeFn || // not malloc()-ed
+ headroom() != 0 || // malloc()-ed block doesn't start at beginning
+ tailroom() == 0 || // no room for NUL terminator
+ isShared() || // shared
+ isChained()) { // chained
// We might as well get rid of all head and tailroom if we're going
// to reallocate; we need 1 byte for NUL terminator.
coalesceAndReallocate(0, computeChainDataLength(), this, 1);
length(), capacity(),
AcquireMallocatedString());
- if (flags_ & kFlagFreeSharedInfo) {
- delete sharedInfo_;
+ if (flags() & kFlagFreeSharedInfo) {
+ delete sharedInfo();
}
// Reset to a state where we can be deleted cleanly
- flags_ = kFlagUserOwned;
- sharedInfo_ = nullptr;
+ flagsAndSharedInfo_ = 0;
buf_ = nullptr;
clear();
return str;
*
* Throws std::bad_alloc on error.
*/
- static std::unique_ptr<IOBuf> create(uint32_t capacity);
- IOBuf(CreateOp, uint32_t capacity);
+ static std::unique_ptr<IOBuf> create(uint64_t capacity);
+ IOBuf(CreateOp, uint64_t capacity);
/**
* Create a new IOBuf, using a single memory allocation to allocate space
* IOBuf object itself is also freed. (It can also be slightly wasteful in
* some cases where you clone this IOBuf and then free the original IOBuf.)
*/
- static std::unique_ptr<IOBuf> createCombined(uint32_t capacity);
+ static std::unique_ptr<IOBuf> createCombined(uint64_t capacity);
/**
* Create a new IOBuf, using separate memory allocations for the IOBuf object
* This requires two memory allocations, but saves space in the long run
* if you know that you will need to reallocate the data buffer later.
*/
- static std::unique_ptr<IOBuf> createSeparate(uint32_t capacity);
+ static std::unique_ptr<IOBuf> createSeparate(uint64_t capacity);
/**
* Allocate a new IOBuf chain with the requested total capacity, allocating
* no more than maxBufCapacity to each buffer.
*/
static std::unique_ptr<IOBuf> createChain(
- size_t totalCapacity, uint32_t maxBufCapacity);
+ size_t totalCapacity, uint64_t maxBufCapacity);
/**
* Create a new IOBuf pointing to an existing data buffer.
* On error, std::bad_alloc will be thrown. If freeOnError is true (the
* default) the buffer will be freed before throwing the error.
*/
- static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint32_t capacity,
+ static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint64_t capacity,
FreeFunction freeFn = nullptr,
void* userData = nullptr,
bool freeOnError = true) {
return takeOwnership(buf, capacity, capacity, freeFn,
userData, freeOnError);
}
- IOBuf(TakeOwnershipOp op, void* buf, uint32_t capacity,
+ IOBuf(TakeOwnershipOp op, void* buf, uint64_t capacity,
FreeFunction freeFn = nullptr, void* userData = nullptr,
bool freeOnError = true)
: IOBuf(op, buf, capacity, capacity, freeFn, userData, freeOnError) {}
- static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint32_t capacity,
- uint32_t length,
+ static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint64_t capacity,
+ uint64_t length,
FreeFunction freeFn = nullptr,
void* userData = nullptr,
bool freeOnError = true);
- IOBuf(TakeOwnershipOp, void* buf, uint32_t capacity, uint32_t length,
+ IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
FreeFunction freeFn = nullptr, void* userData = nullptr,
bool freeOnError = true);
*
* On error, std::bad_alloc will be thrown.
*/
- static std::unique_ptr<IOBuf> wrapBuffer(const void* buf, uint32_t capacity);
+ static std::unique_ptr<IOBuf> wrapBuffer(const void* buf, uint64_t capacity);
static std::unique_ptr<IOBuf> wrapBuffer(ByteRange br) {
- CHECK_LE(br.size(), std::numeric_limits<uint32_t>::max());
return wrapBuffer(br.data(), br.size());
}
- IOBuf(WrapBufferOp op, const void* buf, uint32_t capacity);
+ IOBuf(WrapBufferOp op, const void* buf, uint64_t capacity);
IOBuf(WrapBufferOp op, ByteRange br);
/**
* user-supplied buffer, optionally allocating a given amount of
* headroom and tailroom.
*/
- static std::unique_ptr<IOBuf> copyBuffer(const void* buf, uint32_t size,
- uint32_t headroom=0,
- uint32_t minTailroom=0);
+ static std::unique_ptr<IOBuf> copyBuffer(const void* buf, uint64_t size,
+ uint64_t headroom=0,
+ uint64_t minTailroom=0);
static std::unique_ptr<IOBuf> copyBuffer(ByteRange br,
- uint32_t headroom=0,
- uint32_t minTailroom=0) {
- CHECK_LE(br.size(), std::numeric_limits<uint32_t>::max());
+ uint64_t headroom=0,
+ uint64_t minTailroom=0) {
return copyBuffer(br.data(), br.size(), headroom, minTailroom);
}
- IOBuf(CopyBufferOp op, const void* buf, uint32_t size,
- uint32_t headroom=0, uint32_t minTailroom=0);
+ IOBuf(CopyBufferOp op, const void* buf, uint64_t size,
+ uint64_t headroom=0, uint64_t minTailroom=0);
IOBuf(CopyBufferOp op, ByteRange br,
- uint32_t headroom=0, uint32_t minTailroom=0);
+ uint64_t headroom=0, uint64_t minTailroom=0);
/**
* Convenience function to create a new IOBuf object that copies data from a
* copyBuffer() above, with the size argument of 3.
*/
static std::unique_ptr<IOBuf> copyBuffer(const std::string& buf,
- uint32_t headroom=0,
- uint32_t minTailroom=0);
+ uint64_t headroom=0,
+ uint64_t minTailroom=0);
IOBuf(CopyBufferOp op, const std::string& buf,
- uint32_t headroom=0, uint32_t minTailroom=0)
+ uint64_t headroom=0, uint64_t minTailroom=0)
: IOBuf(op, buf.data(), buf.size(), headroom, minTailroom) {}
/**
* is empty.
*/
static std::unique_ptr<IOBuf> maybeCopyBuffer(const std::string& buf,
- uint32_t headroom=0,
- uint32_t minTailroom=0);
+ uint64_t headroom=0,
+ uint64_t minTailroom=0);
/**
* Convenience function to free a chain of IOBufs held by a unique_ptr.
/**
* Get the data length.
*/
- uint32_t length() const {
+ uint64_t length() const {
return length_;
}
*
* Returns the number of bytes in the buffer before the start of the data.
*/
- uint32_t headroom() const {
+ uint64_t headroom() const {
return data_ - buffer();
}
*
* Returns the number of bytes in the buffer after the end of the data.
*/
- uint32_t tailroom() const {
+ uint64_t tailroom() const {
return bufferEnd() - tail();
}
* This returns the total usable length of the buffer. Use the length()
* method to get the length of the actual valid data in this IOBuf.
*/
- uint32_t capacity() const {
+ uint64_t capacity() const {
return capacity_;
}
* for making sure the buffer is unshared, so it will not affect other IOBufs
* that may be sharing the same underlying buffer.
*/
- void advance(uint32_t amount) {
+ void advance(uint64_t amount) {
// In debug builds, assert if there is a problem.
assert(amount <= tailroom());
* for making sure the buffer is unshared, so it will not affect other IOBufs
* that may be sharing the same underlying buffer.
*/
- void retreat(uint32_t amount) {
+ void retreat(uint64_t amount) {
// In debug builds, assert if there is a problem.
assert(amount <= headroom());
*
* This does not modify any actual data in the buffer.
*/
- void prepend(uint32_t amount) {
+ void prepend(uint64_t amount) {
DCHECK_LE(amount, headroom());
data_ -= amount;
length_ += amount;
*
* This does not modify any actual data in the buffer.
*/
- void append(uint32_t amount) {
+ void append(uint64_t amount) {
DCHECK_LE(amount, tailroom());
length_ += amount;
}
*
* This does not modify any actual data in the buffer.
*/
- void trimStart(uint32_t amount) {
+ void trimStart(uint64_t amount) {
DCHECK_LE(amount, length_);
data_ += amount;
length_ -= amount;
*
* This does not modify any actual data in the buffer.
*/
- void trimEnd(uint32_t amount) {
+ void trimEnd(uint64_t amount) {
DCHECK_LE(amount, length_);
length_ -= amount;
}
* Postcondition: headroom() >= minHeadroom, tailroom() >= minTailroom,
* the data (between data() and data() + length()) is preserved.
*/
- void reserve(uint32_t minHeadroom, uint32_t minTailroom) {
+ void reserve(uint64_t minHeadroom, uint64_t minTailroom) {
// Maybe we don't need to do anything.
if (headroom() >= minHeadroom && tailroom() >= minTailroom) {
return;
* Use isChained() if you just want to check if this IOBuf is part of a chain
* or not.
*/
- uint32_t countChainElements() const;
+ size_t countChainElements() const;
/**
* Get the length of all the data in this IOBuf chain.
* This only checks the current IOBuf, and not other IOBufs in the chain.
*/
bool isSharedOne() const {
- if (LIKELY(flags_ & (kFlagUserOwned | kFlagMaybeShared)) == 0) {
- return false;
- }
-
// If this is a user-owned buffer, it is always considered shared
- if (flags_ & kFlagUserOwned) {
+ if (UNLIKELY(!sharedInfo())) {
return true;
}
+ if (LIKELY(!(flags() & kFlagMaybeShared))) {
+ return false;
+ }
+
// kFlagMaybeShared is set, so we need to check the reference count.
// (Checking the reference count requires an atomic operation, which is why
// we prefer to only check kFlagMaybeShared if possible.)
- DCHECK(flags_ & kFlagMaybeShared);
- bool shared = sharedInfo_->refcount.load(std::memory_order_acquire) > 1;
+ bool shared = sharedInfo()->refcount.load(std::memory_order_acquire) > 1;
if (!shared) {
// we're the last one left
- flags_ &= ~kFlagMaybeShared;
+ clearFlags(kFlagMaybeShared);
}
return shared;
}
* in the chain.
*
* Throws std::bad_alloc on error. On error the IOBuf chain will be
- * unmodified. Throws std::overflow_error if the length of the entire chain
- * larger than can be described by a uint32_t capacity.
+ * unmodified.
*
* Returns ByteRange that points to the data IOBuf stores.
*/
*
* Throws std::bad_alloc or std::overflow_error on error. On error the IOBuf
* chain will be unmodified. Throws std::overflow_error if maxLength is
- * longer than the total chain length, or if the length of the coalesced
- * portion of the chain is larger than can be described by a uint32_t
- * capacity. (Although maxLength is uint32_t, gather() doesn't split
- * buffers, so coalescing whole buffers may result in a capacity that can't
- * be described in uint32_t.
+ * longer than the total chain length.
*
* Upon return, either enough of the chain was coalesced into a contiguous
* region, or the entire chain was coalesced. That is,
* length() >= maxLength || !isChained() is true.
*/
- void gather(uint32_t maxLength) {
+ void gather(uint64_t maxLength) {
if (!isChained() || length_ >= maxLength) {
return;
}
IOBuf& operator=(IOBuf&& other) noexcept;
private:
- enum FlagsEnum : uint32_t {
- kFlagUserOwned = 0x1,
- kFlagFreeSharedInfo = 0x2,
- kFlagMaybeShared = 0x4,
- };
-
- // Values for the type_ field.
- // We currently don't really use this for anything, other than to have it
- // around for debugging purposes. We store it at the moment just because we
- // have the 4 extra bytes that would just be padding otherwise.
- enum ExtBufTypeEnum {
- kExtAllocated = 0,
- kExtUserSupplied = 1,
- kExtUserOwned = 2,
- kCombinedAlloc = 3,
+ enum FlagsEnum : uintptr_t {
+ // Adding any more flags would not work on 32-bit architectures,
+ // as these flags are stashed in the least significant 2 bits of a
+ // max-align-aligned pointer.
+ kFlagFreeSharedInfo = 0x1,
+ kFlagMaybeShared = 0x2,
+ kFlagMask = kFlagFreeSharedInfo | kFlagMaybeShared
};
struct SharedInfo {
* IOBuf. The IOBuf constructor does not automatically increment the
* reference count.
*/
- IOBuf(ExtBufTypeEnum type, uint32_t flags,
- uint8_t* buf, uint32_t capacity,
- uint8_t* data, uint32_t length,
- SharedInfo* sharedInfo);
+ struct InternalConstructor {}; // avoid conflicts
+ IOBuf(InternalConstructor, uintptr_t flagsAndSharedInfo,
+ uint8_t* buf, uint64_t capacity,
+ uint8_t* data, uint64_t length);
void unshareOneSlow();
void unshareChained();
coalesceAndReallocate(headroom(), newLength, end, end->prev_->tailroom());
}
void decrementRefcount();
- void reserveSlow(uint32_t minHeadroom, uint32_t minTailroom);
+ void reserveSlow(uint64_t minHeadroom, uint64_t minTailroom);
void freeExtBuffer();
- static size_t goodExtBufferSize(uint32_t minCapacity);
+ static size_t goodExtBufferSize(uint64_t minCapacity);
static void initExtBuffer(uint8_t* buf, size_t mallocSize,
SharedInfo** infoReturn,
- uint32_t* capacityReturn);
- static void allocExtBuffer(uint32_t minCapacity,
+ uint64_t* capacityReturn);
+ static void allocExtBuffer(uint64_t minCapacity,
uint8_t** bufReturn,
SharedInfo** infoReturn,
- uint32_t* capacityReturn);
+ uint64_t* capacityReturn);
static void releaseStorage(HeapStorage* storage, uint16_t freeFlags);
static void freeInternalBuf(void* buf, void* userData);
*/
uint8_t* data_{nullptr};
uint8_t* buf_{nullptr};
- uint32_t length_{0};
- uint32_t capacity_{0};
- mutable uint32_t flags_{kFlagUserOwned};
- uint32_t type_{kExtUserOwned};
- // SharedInfo may be NULL if kFlagUserOwned is set. It is non-NULL
- // in all other cases.
- SharedInfo* sharedInfo_{nullptr};
+ uint64_t length_{0};
+ uint64_t capacity_{0};
+
+ // Pack flags in least significant 2 bits, sharedInfo in the rest
+ mutable uintptr_t flagsAndSharedInfo_{0};
+
+ static inline uintptr_t packFlagsAndSharedInfo(uintptr_t flags,
+ SharedInfo* info) {
+ uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
+ DCHECK_EQ(flags & ~kFlagMask, 0);
+ DCHECK_EQ(uinfo & kFlagMask, 0);
+ return flags | uinfo;
+ }
+
+ inline SharedInfo* sharedInfo() const {
+ return reinterpret_cast<SharedInfo*>(flagsAndSharedInfo_ & ~kFlagMask);
+ }
+
+ inline void setSharedInfo(SharedInfo* info) {
+ uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
+ DCHECK_EQ(uinfo & kFlagMask, 0);
+ flagsAndSharedInfo_ = (flagsAndSharedInfo_ & kFlagMask) | uinfo;
+ }
+
+ inline uintptr_t flags() const {
+ return flagsAndSharedInfo_ & kFlagMask;
+ }
+
+ // flags_ are changed from const methods
+ inline void setFlags(uintptr_t flags) const {
+ DCHECK_EQ(flags & ~kFlagMask, 0);
+ flagsAndSharedInfo_ |= flags;
+ }
+
+ inline void clearFlags(uintptr_t flags) const {
+ DCHECK_EQ(flags & ~kFlagMask, 0);
+ flagsAndSharedInfo_ &= ~flags;
+ }
+
+ inline void setFlagsAndSharedInfo(uintptr_t flags, SharedInfo* info) {
+ flagsAndSharedInfo_ = packFlagsAndSharedInfo(flags, info);
+ }
struct DeleterBase {
virtual ~DeleterBase() { }
std::unique_ptr<IOBuf>>::type
IOBuf::takeOwnership(UniquePtr&& buf, size_t count) {
size_t size = count * sizeof(typename UniquePtr::element_type);
- DCHECK_LT(size, size_t(std::numeric_limits<uint32_t>::max()));
auto deleter = new UniquePtrDeleter<UniquePtr>(buf.get_deleter());
return takeOwnership(buf.release(),
size,
}
inline std::unique_ptr<IOBuf> IOBuf::copyBuffer(
- const void* data, uint32_t size, uint32_t headroom,
- uint32_t minTailroom) {
- uint32_t capacity = headroom + size + minTailroom;
+ const void* data, uint64_t size, uint64_t headroom,
+ uint64_t minTailroom) {
+ uint64_t capacity = headroom + size + minTailroom;
std::unique_ptr<IOBuf> buf = create(capacity);
buf->advance(headroom);
memcpy(buf->writableData(), data, size);
}
inline std::unique_ptr<IOBuf> IOBuf::copyBuffer(const std::string& buf,
- uint32_t headroom,
- uint32_t minTailroom) {
+ uint64_t headroom,
+ uint64_t minTailroom) {
return copyBuffer(buf.data(), buf.size(), headroom, minTailroom);
}
inline std::unique_ptr<IOBuf> IOBuf::maybeCopyBuffer(const std::string& buf,
- uint32_t headroom,
- uint32_t minTailroom) {
+ uint64_t headroom,
+ uint64_t minTailroom) {
if (buf.empty()) {
return nullptr;
}
using folly::IOBuf;
const size_t MIN_ALLOC_SIZE = 2000;
-const size_t MAX_ALLOC_SIZE = 8000; // Must fit within a uint32_t
+const size_t MAX_ALLOC_SIZE = 8000;
const size_t MAX_PACK_COPY = 4096;
/**
// reduce wastage (the tail's tailroom and the head's headroom) when
// joining two IOBufQueues together.
size_t copyRemaining = MAX_PACK_COPY;
- uint32_t n;
+ uint64_t n;
while (src &&
(n = src->length()) < copyRemaining &&
n < tail->tailroom()) {
return *this;
}
-std::pair<void*, uint32_t>
+std::pair<void*, uint64_t>
IOBufQueue::headroom() {
if (head_) {
return std::make_pair(head_->writableBuffer(), head_->headroom());
}
void
-IOBufQueue::markPrepended(uint32_t n) {
+IOBufQueue::markPrepended(uint64_t n) {
if (n == 0) {
return;
}
}
void
-IOBufQueue::prepend(const void* buf, uint32_t n) {
+IOBufQueue::prepend(const void* buf, uint64_t n) {
auto p = headroom();
if (n > p.second) {
throw std::overflow_error("Not enough room to prepend");
false);
}
IOBuf* last = head_->prev();
- uint32_t copyLen = std::min(len, (size_t)last->tailroom());
+ uint64_t copyLen = std::min(len, (size_t)last->tailroom());
memcpy(last->writableTail(), src, copyLen);
src += copyLen;
last->append(copyLen);
}
void
-IOBufQueue::wrapBuffer(const void* buf, size_t len, uint32_t blockSize) {
+IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
auto src = static_cast<const uint8_t*>(buf);
while (len != 0) {
size_t n = std::min(len, size_t(blockSize));
}
}
-pair<void*,uint32_t>
-IOBufQueue::preallocateSlow(uint32_t min, uint32_t newAllocationSize,
- uint32_t max) {
+pair<void*,uint64_t>
+IOBufQueue::preallocateSlow(uint64_t min, uint64_t newAllocationSize,
+ uint64_t max) {
// Allocate a new buffer of the requested max size.
unique_ptr<IOBuf> newBuf(IOBuf::create(std::max(min, newAllocationSize)));
appendToChain(head_, std::move(newBuf), false);
/**
* Return a space to prepend bytes and the amount of headroom available.
*/
- std::pair<void*, uint32_t> headroom();
+ std::pair<void*, uint64_t> headroom();
/**
* Indicate that n bytes from the headroom have been used.
*/
- void markPrepended(uint32_t n);
+ void markPrepended(uint64_t n);
/**
* Prepend an existing range; throws std::overflow_error if not enough
* room.
*/
- void prepend(const void* buf, uint32_t n);
+ void prepend(const void* buf, uint64_t n);
/**
* Add a buffer or buffer chain to the end of this queue. The
* Importantly, this method may be used to wrap buffers larger than 4GB.
*/
void wrapBuffer(const void* buf, size_t len,
- uint32_t blockSize=(1U << 31)); // default block size: 2GB
+ uint64_t blockSize=(1U << 31)); // default block size: 2GB
/**
* Obtain a writable block of contiguous bytes at the end of this
* callback, tell the application how much of the buffer they've
* filled with data.
*/
- std::pair<void*,uint32_t> preallocate(
- uint32_t min, uint32_t newAllocationSize,
- uint32_t max = std::numeric_limits<uint32_t>::max()) {
+ std::pair<void*,uint64_t> preallocate(
+ uint64_t min, uint64_t newAllocationSize,
+ uint64_t max = std::numeric_limits<uint64_t>::max()) {
auto buf = tailBuf();
if (LIKELY(buf && buf->tailroom() >= min)) {
return std::make_pair(buf->writableTail(),
* invoke any other non-const methods on this IOBufQueue between
* the call to preallocate and the call to postallocate().
*/
- void postallocate(uint32_t n) {
+ void postallocate(uint64_t n) {
head_->prev()->append(n);
chainLength_ += n;
}
* Obtain a writable block of n contiguous bytes, allocating more space
* if necessary, and mark it as used. The caller can fill it later.
*/
- void* allocate(uint32_t n) {
+ void* allocate(uint64_t n) {
void* p = preallocate(n, n).first;
postallocate(n);
return p;
IOBuf* buf = head_->prev();
return LIKELY(!buf->isSharedOne()) ? buf : nullptr;
}
- std::pair<void*,uint32_t> preallocateSlow(
- uint32_t min, uint32_t newAllocationSize, uint32_t max);
+ std::pair<void*,uint64_t> preallocateSlow(
+ uint64_t min, uint64_t newAllocationSize, uint64_t max);
static const size_t kChainLengthNotCached = (size_t)-1;
/** Not copyable */