2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_LIMIT_MACROS
18 #define __STDC_LIMIT_MACROS
21 #include <folly/io/IOBuf.h>
23 #include <folly/Conv.h>
24 #include <folly/Likely.h>
25 #include <folly/Malloc.h>
26 #include <folly/Memory.h>
27 #include <folly/ScopeGuard.h>
28 #include <folly/SpookyHashV2.h>
29 #include <folly/io/Cursor.h>
36 using std::unique_ptr;
42 // This memory segment contains an IOBuf that is still in use
44 // This memory segment contains buffer data that is still in use
49 // When create() is called for buffers less than kDefaultCombinedBufSize,
50 // we allocate a single combined memory segment for the IOBuf and the data
51 // together. See the comments for createCombined()/createSeparate() for more
54 // (The size of 1k is largely just a guess here. We could could probably do
55 // benchmarks of real applications to see if adjusting this number makes a
56 // difference. Callers that know their exact use case can also explicitly
57 // call createCombined() or createSeparate().)
58 kDefaultCombinedBufSize = 1024
61 // Helper function for IOBuf::takeOwnership()
62 void takeOwnershipError(bool freeOnError, void* buf,
63 folly::IOBuf::FreeFunction freeFn,
73 freeFn(buf, userData);
75 // The user's free function is not allowed to throw.
76 // (We are already in the middle of throwing an exception, so
77 // we cannot let this exception go unhandled.)
82 } // unnamed namespace
86 struct IOBuf::HeapPrefix {
87 HeapPrefix(uint16_t flg)
91 // Reset magic to 0 on destruction. This is solely for debugging purposes
92 // to help catch bugs where someone tries to use HeapStorage after it has
98 std::atomic<uint16_t> flags;
101 struct IOBuf::HeapStorage {
103 // The IOBuf is last in the HeapStorage object.
104 // This way operator new will work even if allocating a subclass of IOBuf
105 // that requires more space.
109 struct IOBuf::HeapFullStorage {
110 // Make sure jemalloc allocates from the 64-byte class. Putting this here
111 // because HeapStorage is private so it can't be at namespace level.
112 static_assert(sizeof(HeapStorage) <= 64,
113 "IOBuf may not grow over 56 bytes!");
117 std::max_align_t align;
120 IOBuf::SharedInfo::SharedInfo()
123 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
124 // no other threads should be referring to it yet.
125 refcount.store(1, std::memory_order_relaxed);
128 IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
131 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
132 // no other threads should be referring to it yet.
133 refcount.store(1, std::memory_order_relaxed);
136 void* IOBuf::operator new(size_t size) {
137 size_t fullSize = offsetof(HeapStorage, buf) + size;
138 auto* storage = static_cast<HeapStorage*>(malloc(fullSize));
139 // operator new is not allowed to return NULL
140 if (UNLIKELY(storage == nullptr)) {
141 throw std::bad_alloc();
144 new (&storage->prefix) HeapPrefix(kIOBufInUse);
145 return &(storage->buf);
148 void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; }
150 void IOBuf::operator delete(void* ptr) {
151 auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
152 auto* storage = reinterpret_cast<HeapStorage*>(storageAddr);
153 releaseStorage(storage, kIOBufInUse);
156 void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) {
157 CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic));
159 // Use relaxed memory order here. If we are unlucky and happen to get
160 // out-of-date data the compare_exchange_weak() call below will catch
161 // it and load new data with memory_order_acq_rel.
162 auto flags = storage->prefix.flags.load(std::memory_order_acquire);
163 DCHECK_EQ((flags & freeFlags), freeFlags);
166 uint16_t newFlags = (flags & ~freeFlags);
168 // The storage space is now unused. Free it.
169 storage->prefix.HeapPrefix::~HeapPrefix();
174 // This storage segment still contains portions that are in use.
175 // Just clear the flags specified in freeFlags for now.
176 auto ret = storage->prefix.flags.compare_exchange_weak(
177 flags, newFlags, std::memory_order_acq_rel);
179 // We successfully updated the flags.
183 // We failed to update the flags. Some other thread probably updated them
184 // and cleared some of the other bits. Continue around the loop to see if
185 // we are the last user now, or if we need to try updating the flags again.
189 void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
190 auto* storage = static_cast<HeapStorage*>(userData);
191 releaseStorage(storage, kDataInUse);
194 IOBuf::IOBuf(CreateOp, uint64_t capacity)
199 flagsAndSharedInfo_(0) {
201 allocExtBuffer(capacity, &buf_, &info, &capacity_);
206 IOBuf::IOBuf(CopyBufferOp /* op */,
210 uint64_t minTailroom)
211 : IOBuf(CREATE, headroom + size + minTailroom) {
213 memcpy(writableData(), buf, size);
217 IOBuf::IOBuf(CopyBufferOp op, ByteRange br,
218 uint64_t headroom, uint64_t minTailroom)
219 : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {
222 unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
223 // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
224 // all with a single allocation.
226 // We don't do this for larger buffers since it can be wasteful if the user
227 // needs to reallocate the buffer but keeps using the same IOBuf object.
228 // In this case we can't free the data space until the IOBuf is also
229 // destroyed. Callers can explicitly call createCombined() or
230 // createSeparate() if they know their use case better, and know if they are
231 // likely to reallocate the buffer later.
232 if (capacity <= kDefaultCombinedBufSize) {
233 return createCombined(capacity);
235 return createSeparate(capacity);
238 unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
239 // To save a memory allocation, allocate space for the IOBuf object, the
240 // SharedInfo struct, and the data itself all with a single call to malloc().
241 size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
242 size_t mallocSize = goodMallocSize(requiredStorage);
243 auto* storage = static_cast<HeapFullStorage*>(malloc(mallocSize));
245 new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse);
246 new (&storage->shared) SharedInfo(freeInternalBuf, storage);
248 uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
249 uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
250 size_t actualCapacity = storageEnd - bufAddr;
251 unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
252 InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
253 bufAddr, actualCapacity, bufAddr, 0));
257 unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
258 return make_unique<IOBuf>(CREATE, capacity);
261 unique_ptr<IOBuf> IOBuf::createChain(
262 size_t totalCapacity, uint64_t maxBufCapacity) {
263 unique_ptr<IOBuf> out = create(
264 std::min(totalCapacity, size_t(maxBufCapacity)));
265 size_t allocatedCapacity = out->capacity();
267 while (allocatedCapacity < totalCapacity) {
268 unique_ptr<IOBuf> newBuf = create(
269 std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity)));
270 allocatedCapacity += newBuf->capacity();
271 out->prependChain(std::move(newBuf));
277 IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
278 FreeFunction freeFn, void* userData,
282 data_(static_cast<uint8_t*>(buf)),
283 buf_(static_cast<uint8_t*>(buf)),
286 flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
288 setSharedInfo(new SharedInfo(freeFn, userData));
290 takeOwnershipError(freeOnError, buf, freeFn, userData);
295 unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
301 // TODO: We could allocate the IOBuf object and SharedInfo all in a single
302 // memory allocation. We could use the existing HeapStorage class, and
303 // define a new kSharedInfoInUse flag. We could change our code to call
304 // releaseStorage(kFlagFreeSharedInfo) when this kFlagFreeSharedInfo,
305 // rather than directly calling delete.
307 // Note that we always pass freeOnError as false to the constructor.
308 // If the constructor throws we'll handle it below. (We have to handle
309 // allocation failures from make_unique too.)
310 return make_unique<IOBuf>(TAKE_OWNERSHIP, buf, capacity, length,
311 freeFn, userData, false);
313 takeOwnershipError(freeOnError, buf, freeFn, userData);
318 IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
319 : IOBuf(InternalConstructor(), 0,
320 // We cast away the const-ness of the buffer here.
321 // This is okay since IOBuf users must use unshare() to create a copy
322 // of this buffer before writing to the buffer.
323 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
324 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {
327 IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
328 : IOBuf(op, br.data(), br.size()) {
331 unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
332 return make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
335 IOBuf::IOBuf() noexcept {
338 IOBuf::IOBuf(IOBuf&& other) noexcept {
339 *this = std::move(other);
342 IOBuf::IOBuf(const IOBuf& other) {
343 other.cloneInto(*this);
346 IOBuf::IOBuf(InternalConstructor,
347 uintptr_t flagsAndSharedInfo,
358 flagsAndSharedInfo_(flagsAndSharedInfo) {
360 assert(data + length <= buf + capacity);
364 // Destroying an IOBuf destroys the entire chain.
365 // Users of IOBuf should only explicitly delete the head of any chain.
366 // The other elements in the chain will be automatically destroyed.
367 while (next_ != this) {
368 // Since unlink() returns unique_ptr() and we don't store it,
369 // it will automatically delete the unlinked element.
370 (void)next_->unlink();
376 IOBuf& IOBuf::operator=(IOBuf&& other) noexcept {
377 if (this == &other) {
381 // If we are part of a chain, delete the rest of the chain.
382 while (next_ != this) {
383 // Since unlink() returns unique_ptr() and we don't store it,
384 // it will automatically delete the unlinked element.
385 (void)next_->unlink();
388 // Decrement our refcount on the current buffer
391 // Take ownership of the other buffer's data
394 length_ = other.length_;
395 capacity_ = other.capacity_;
396 flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
397 // Reset other so it is a clean state to be destroyed.
398 other.data_ = nullptr;
399 other.buf_ = nullptr;
402 other.flagsAndSharedInfo_ = 0;
404 // If other was part of the chain, assume ownership of the rest of its chain.
405 // (It's only valid to perform move assignment on the head of a chain.)
406 if (other.next_ != &other) {
409 other.next_ = &other;
413 other.prev_ = &other;
416 // Sanity check to make sure that other is in a valid state to be destroyed.
417 DCHECK_EQ(other.prev_, &other);
418 DCHECK_EQ(other.next_, &other);
423 IOBuf& IOBuf::operator=(const IOBuf& other) {
424 if (this != &other) {
425 *this = IOBuf(other);
430 bool IOBuf::empty() const {
431 const IOBuf* current = this;
433 if (current->length() != 0) {
436 current = current->next_;
437 } while (current != this);
441 size_t IOBuf::countChainElements() const {
442 size_t numElements = 1;
443 for (IOBuf* current = next_; current != this; current = current->next_) {
449 uint64_t IOBuf::computeChainDataLength() const {
450 uint64_t fullLength = length_;
451 for (IOBuf* current = next_; current != this; current = current->next_) {
452 fullLength += current->length_;
457 void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
458 // Take ownership of the specified IOBuf
459 IOBuf* other = iobuf.release();
461 // Remember the pointer to the tail of the other chain
462 IOBuf* otherTail = other->prev_;
464 // Hook up prev_->next_ to point at the start of the other chain,
465 // and other->prev_ to point at prev_
466 prev_->next_ = other;
467 other->prev_ = prev_;
469 // Hook up otherTail->next_ to point at us,
470 // and prev_ to point back at otherTail,
471 otherTail->next_ = this;
475 unique_ptr<IOBuf> IOBuf::clone() const {
476 unique_ptr<IOBuf> ret = make_unique<IOBuf>();
481 unique_ptr<IOBuf> IOBuf::cloneOne() const {
482 unique_ptr<IOBuf> ret = make_unique<IOBuf>();
487 void IOBuf::cloneInto(IOBuf& other) const {
491 for (IOBuf* current = next_; current != this; current = current->next_) {
492 tmp.prependChain(current->cloneOne());
495 other = std::move(tmp);
498 void IOBuf::cloneOneInto(IOBuf& other) const {
499 SharedInfo* info = sharedInfo();
501 setFlags(kFlagMaybeShared);
503 other = IOBuf(InternalConstructor(),
504 flagsAndSharedInfo_, buf_, capacity_,
507 info->refcount.fetch_add(1, std::memory_order_acq_rel);
511 void IOBuf::unshareOneSlow() {
512 // Allocate a new buffer for the data
514 SharedInfo* sharedInfo;
515 uint64_t actualCapacity;
516 allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
519 // Maintain the same amount of headroom. Since we maintained the same
520 // minimum capacity we also maintain at least the same amount of tailroom.
521 uint64_t headlen = headroom();
522 memcpy(buf + headlen, data_, length_);
524 // Release our reference on the old buffer
526 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
527 setFlagsAndSharedInfo(0, sharedInfo);
529 // Update the buffer pointers to point to the new buffer
530 data_ = buf + headlen;
534 void IOBuf::unshareChained() {
535 // unshareChained() should only be called if we are part of a chain of
536 // multiple IOBufs. The caller should have already verified this.
539 IOBuf* current = this;
541 if (current->isSharedOne()) {
542 // we have to unshare
546 current = current->next_;
547 if (current == this) {
548 // None of the IOBufs in the chain are shared,
549 // so return without doing anything
554 // We have to unshare. Let coalesceSlow() do the work.
558 void IOBuf::makeManagedChained() {
561 IOBuf* current = this;
563 current->makeManagedOne();
564 current = current->next_;
565 if (current == this) {
571 void IOBuf::coalesceSlow() {
572 // coalesceSlow() should only be called if we are part of a chain of multiple
573 // IOBufs. The caller should have already verified this.
576 // Compute the length of the entire chain
577 uint64_t newLength = 0;
580 newLength += end->length_;
582 } while (end != this);
584 coalesceAndReallocate(newLength, end);
585 // We should be only element left in the chain now
586 DCHECK(!isChained());
589 void IOBuf::coalesceSlow(size_t maxLength) {
590 // coalesceSlow() should only be called if we are part of a chain of multiple
591 // IOBufs. The caller should have already verified this.
593 DCHECK_LT(length_, maxLength);
595 // Compute the length of the entire chain
596 uint64_t newLength = 0;
599 newLength += end->length_;
601 if (newLength >= maxLength) {
605 throw std::overflow_error("attempted to coalesce more data than "
610 coalesceAndReallocate(newLength, end);
611 // We should have the requested length now
612 DCHECK_GE(length_, maxLength);
615 void IOBuf::coalesceAndReallocate(size_t newHeadroom,
618 size_t newTailroom) {
619 uint64_t newCapacity = newLength + newHeadroom + newTailroom;
621 // Allocate space for the coalesced buffer.
622 // We always convert to an external buffer, even if we happened to be an
623 // internal buffer before.
626 uint64_t actualCapacity;
627 allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
629 // Copy the data into the new buffer
630 uint8_t* newData = newBuf + newHeadroom;
631 uint8_t* p = newData;
632 IOBuf* current = this;
633 size_t remaining = newLength;
635 assert(current->length_ <= remaining);
636 remaining -= current->length_;
637 memcpy(p, current->data_, current->length_);
638 p += current->length_;
639 current = current->next_;
640 } while (current != end);
641 assert(remaining == 0);
643 // Point at the new buffer
646 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
647 setFlagsAndSharedInfo(0, newInfo);
649 capacity_ = actualCapacity;
654 // Separate from the rest of our chain.
655 // Since we don't store the unique_ptr returned by separateChain(),
656 // this will immediately delete the returned subchain.
658 (void)separateChain(next_, current->prev_);
662 void IOBuf::decrementRefcount() {
663 // Externally owned buffers don't have a SharedInfo object and aren't managed
664 // by the reference count
665 SharedInfo* info = sharedInfo();
670 // Decrement the refcount
671 uint32_t newcnt = info->refcount.fetch_sub(
672 1, std::memory_order_acq_rel);
673 // Note that fetch_sub() returns the value before we decremented.
674 // If it is 1, we were the only remaining user; if it is greater there are
675 // still other users.
680 // We were the last user. Free the buffer
683 // Free the SharedInfo if it was allocated separately.
685 // This is only used by takeOwnership().
687 // To avoid this special case handling in decrementRefcount(), we could have
688 // takeOwnership() set a custom freeFn() that calls the user's free function
689 // then frees the SharedInfo object. (This would require that
690 // takeOwnership() store the user's free function with its allocated
691 // SharedInfo object.) However, handling this specially with a flag seems
692 // like it shouldn't be problematic.
693 if (flags() & kFlagFreeSharedInfo) {
698 void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
699 size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
700 DCHECK_LT(newCapacity, UINT32_MAX);
702 // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
703 // reallocate and free the original buffer. It should only ever be called if
704 // we are the only user of the buffer.
705 DCHECK(!isSharedOne());
707 // We'll need to reallocate the buffer.
708 // There are a few options.
709 // - If we have enough total room, move the data around in the buffer
710 // and adjust the data_ pointer.
711 // - If we're using an internal buffer, we'll switch to an external
712 // buffer with enough headroom and tailroom.
713 // - If we have enough headroom (headroom() >= minHeadroom) but not too much
714 // (so we don't waste memory), we can try one of two things, depending on
715 // whether we use jemalloc or not:
716 // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
717 // - If not using jemalloc and we don't have too much to copy,
718 // we'll use realloc() (note that realloc might have to copy
719 // headroom + data + tailroom, see smartRealloc in folly/Malloc.h)
720 // - Otherwise, bite the bullet and reallocate.
721 if (headroom() + tailroom() >= minHeadroom + minTailroom) {
722 uint8_t* newData = writableBuffer() + minHeadroom;
723 memmove(newData, data_, length_);
728 size_t newAllocatedCapacity = 0;
729 uint8_t* newBuffer = nullptr;
730 uint64_t newHeadroom = 0;
731 uint64_t oldHeadroom = headroom();
733 // If we have a buffer allocated with malloc and we just need more tailroom,
734 // try to use realloc()/xallocx() to grow the buffer in place.
735 SharedInfo* info = sharedInfo();
736 if (info && (info->freeFn == nullptr) && length_ != 0 &&
737 oldHeadroom >= minHeadroom) {
738 size_t headSlack = oldHeadroom - minHeadroom;
739 newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
740 if (usingJEMalloc()) {
741 // We assume that tailroom is more useful and more important than
742 // headroom (not least because realloc / xallocx allow us to grow the
743 // buffer at the tail, but not at the head) So, if we have more headroom
744 // than we need, we consider that "wasted". We arbitrarily define "too
745 // much" headroom to be 25% of the capacity.
746 if (headSlack * 4 <= newCapacity) {
747 size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
749 if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
750 if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
751 newBuffer = static_cast<uint8_t*>(p);
752 newHeadroom = oldHeadroom;
754 // if xallocx failed, do nothing, fall back to malloc/memcpy/free
757 } else { // Not using jemalloc
758 size_t copySlack = capacity() - length_;
759 if (copySlack * 2 <= length_) {
760 void* p = realloc(buf_, newAllocatedCapacity);
761 if (UNLIKELY(p == nullptr)) {
762 throw std::bad_alloc();
764 newBuffer = static_cast<uint8_t*>(p);
765 newHeadroom = oldHeadroom;
770 // None of the previous reallocation strategies worked (or we're using
771 // an internal buffer). malloc/copy/free.
772 if (newBuffer == nullptr) {
773 newAllocatedCapacity = goodExtBufferSize(newCapacity);
774 void* p = malloc(newAllocatedCapacity);
775 if (UNLIKELY(p == nullptr)) {
776 throw std::bad_alloc();
778 newBuffer = static_cast<uint8_t*>(p);
779 memcpy(newBuffer + minHeadroom, data_, length_);
783 newHeadroom = minHeadroom;
787 initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
789 if (flags() & kFlagFreeSharedInfo) {
793 setFlagsAndSharedInfo(0, info);
796 data_ = newBuffer + newHeadroom;
797 // length_ is unchanged
800 void IOBuf::freeExtBuffer() {
801 SharedInfo* info = sharedInfo();
806 info->freeFn(buf_, info->userData);
808 // The user's free function should never throw. Otherwise we might
809 // throw from the IOBuf destructor. Other code paths like coalesce()
810 // also assume that decrementRefcount() cannot throw.
818 void IOBuf::allocExtBuffer(uint64_t minCapacity,
820 SharedInfo** infoReturn,
821 uint64_t* capacityReturn) {
822 size_t mallocSize = goodExtBufferSize(minCapacity);
823 uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
824 if (UNLIKELY(buf == nullptr)) {
825 throw std::bad_alloc();
827 initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
831 size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
832 // Determine how much space we should allocate. We'll store the SharedInfo
833 // for the external buffer just after the buffer itself. (We store it just
834 // after the buffer rather than just before so that the code can still just
835 // use free(buf_) to free the buffer.)
836 size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
837 // Add room for padding so that the SharedInfo will be aligned on an 8-byte
839 minSize = (minSize + 7) & ~7;
841 // Use goodMallocSize() to bump up the capacity to a decent size to request
842 // from malloc, so we can use all of the space that malloc will probably give
844 return goodMallocSize(minSize);
847 void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
848 SharedInfo** infoReturn,
849 uint64_t* capacityReturn) {
850 // Find the SharedInfo storage at the end of the buffer
851 // and construct the SharedInfo.
852 uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
853 SharedInfo* sharedInfo = new(infoStart) SharedInfo;
855 *capacityReturn = infoStart - buf;
856 *infoReturn = sharedInfo;
859 fbstring IOBuf::moveToFbString() {
860 // malloc-allocated buffers are just fine, everything else needs
861 // to be turned into one.
862 if (!sharedInfo() || // user owned, not ours to give up
863 sharedInfo()->freeFn || // not malloc()-ed
864 headroom() != 0 || // malloc()-ed block doesn't start at beginning
865 tailroom() == 0 || // no room for NUL terminator
866 isShared() || // shared
867 isChained()) { // chained
868 // We might as well get rid of all head and tailroom if we're going
869 // to reallocate; we need 1 byte for NUL terminator.
870 coalesceAndReallocate(0, computeChainDataLength(), this, 1);
873 // Ensure NUL terminated
875 fbstring str(reinterpret_cast<char*>(writableData()),
876 length(), capacity(),
877 AcquireMallocatedString());
879 if (flags() & kFlagFreeSharedInfo) {
883 // Reset to a state where we can be deleted cleanly
884 flagsAndSharedInfo_ = 0;
890 IOBuf::Iterator IOBuf::cbegin() const {
891 return Iterator(this, this);
894 IOBuf::Iterator IOBuf::cend() const {
895 return Iterator(nullptr, nullptr);
898 folly::fbvector<struct iovec> IOBuf::getIov() const {
899 folly::fbvector<struct iovec> iov;
900 iov.reserve(countChainElements());
905 void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const {
906 IOBuf const* p = this;
908 // some code can get confused by empty iovs, so skip them
909 if (p->length() > 0) {
910 iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())});
916 size_t IOBuf::fillIov(struct iovec* iov, size_t len) const {
917 IOBuf const* p = this;
920 // some code can get confused by empty iovs, so skip them
921 if (p->length() > 0) {
922 iov[i].iov_base = const_cast<uint8_t*>(p->data());
923 iov[i].iov_len = p->length();
934 size_t IOBufHash::operator()(const IOBuf& buf) const {
935 folly::hash::SpookyHashV2 hasher;
937 io::Cursor cursor(&buf);
939 auto p = cursor.peek();
943 hasher.Update(p.first, p.second);
944 cursor.skip(p.second);
948 hasher.Final(&h1, &h2);
952 bool IOBufEqual::operator()(const IOBuf& a, const IOBuf& b) const {
958 if (pa.second == 0 && pb.second == 0) {
960 } else if (pa.second == 0 || pb.second == 0) {
963 size_t n = std::min(pa.second, pb.second);
965 if (memcmp(pa.first, pb.first, n)) {