2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_LIMIT_MACROS
18 #define __STDC_LIMIT_MACROS
21 #include <folly/io/IOBuf.h>
28 #include <folly/Conv.h>
29 #include <folly/Likely.h>
30 #include <folly/Memory.h>
31 #include <folly/ScopeGuard.h>
32 #include <folly/hash/SpookyHashV2.h>
33 #include <folly/io/Cursor.h>
34 #include <folly/lang/Align.h>
35 #include <folly/memory/Malloc.h>
37 using std::unique_ptr;
43 // This memory segment contains an IOBuf that is still in use
45 // This memory segment contains buffer data that is still in use
50 // When create() is called for buffers less than kDefaultCombinedBufSize,
51 // we allocate a single combined memory segment for the IOBuf and the data
52 // together. See the comments for createCombined()/createSeparate() for more
55 // (The size of 1k is largely just a guess here. We could could probably do
56 // benchmarks of real applications to see if adjusting this number makes a
57 // difference. Callers that know their exact use case can also explicitly
58 // call createCombined() or createSeparate().)
59 kDefaultCombinedBufSize = 1024
62 // Helper function for IOBuf::takeOwnership()
63 void takeOwnershipError(bool freeOnError, void* buf,
64 folly::IOBuf::FreeFunction freeFn,
74 freeFn(buf, userData);
76 // The user's free function is not allowed to throw.
77 // (We are already in the middle of throwing an exception, so
78 // we cannot let this exception go unhandled.)
87 struct IOBuf::HeapPrefix {
88 explicit HeapPrefix(uint16_t flg) : magic(kHeapMagic), flags(flg) {}
90 // Reset magic to 0 on destruction. This is solely for debugging purposes
91 // to help catch bugs where someone tries to use HeapStorage after it has
97 std::atomic<uint16_t> flags;
100 struct IOBuf::HeapStorage {
102 // The IOBuf is last in the HeapStorage object.
103 // This way operator new will work even if allocating a subclass of IOBuf
104 // that requires more space.
108 struct IOBuf::HeapFullStorage {
109 // Make sure jemalloc allocates from the 64-byte class. Putting this here
110 // because HeapStorage is private so it can't be at namespace level.
111 static_assert(sizeof(HeapStorage) <= 64,
112 "IOBuf may not grow over 56 bytes!");
116 folly::max_align_t align;
119 IOBuf::SharedInfo::SharedInfo()
122 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
123 // no other threads should be referring to it yet.
124 refcount.store(1, std::memory_order_relaxed);
127 IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
130 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
131 // no other threads should be referring to it yet.
132 refcount.store(1, std::memory_order_relaxed);
135 void* IOBuf::operator new(size_t size) {
136 size_t fullSize = offsetof(HeapStorage, buf) + size;
137 auto* storage = static_cast<HeapStorage*>(malloc(fullSize));
138 // operator new is not allowed to return nullptr
139 if (UNLIKELY(storage == nullptr)) {
140 throw std::bad_alloc();
143 new (&storage->prefix) HeapPrefix(kIOBufInUse);
144 return &(storage->buf);
147 void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; }
149 void IOBuf::operator delete(void* ptr) {
150 auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
151 auto* storage = reinterpret_cast<HeapStorage*>(storageAddr);
152 releaseStorage(storage, kIOBufInUse);
155 void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) {
156 CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic));
158 // Use relaxed memory order here. If we are unlucky and happen to get
159 // out-of-date data the compare_exchange_weak() call below will catch
160 // it and load new data with memory_order_acq_rel.
161 auto flags = storage->prefix.flags.load(std::memory_order_acquire);
162 DCHECK_EQ((flags & freeFlags), freeFlags);
165 uint16_t newFlags = uint16_t(flags & ~freeFlags);
167 // The storage space is now unused. Free it.
168 storage->prefix.HeapPrefix::~HeapPrefix();
173 // This storage segment still contains portions that are in use.
174 // Just clear the flags specified in freeFlags for now.
175 auto ret = storage->prefix.flags.compare_exchange_weak(
176 flags, newFlags, std::memory_order_acq_rel);
178 // We successfully updated the flags.
182 // We failed to update the flags. Some other thread probably updated them
183 // and cleared some of the other bits. Continue around the loop to see if
184 // we are the last user now, or if we need to try updating the flags again.
188 void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
189 auto* storage = static_cast<HeapStorage*>(userData);
190 releaseStorage(storage, kDataInUse);
193 IOBuf::IOBuf(CreateOp, uint64_t capacity)
198 flagsAndSharedInfo_(0) {
200 allocExtBuffer(capacity, &buf_, &info, &capacity_);
205 IOBuf::IOBuf(CopyBufferOp /* op */,
209 uint64_t minTailroom)
210 : IOBuf(CREATE, headroom + size + minTailroom) {
213 assert(buf != nullptr);
214 memcpy(writableData(), buf, size);
219 IOBuf::IOBuf(CopyBufferOp op, ByteRange br,
220 uint64_t headroom, uint64_t minTailroom)
221 : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {
224 unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
225 // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
226 // all with a single allocation.
228 // We don't do this for larger buffers since it can be wasteful if the user
229 // needs to reallocate the buffer but keeps using the same IOBuf object.
230 // In this case we can't free the data space until the IOBuf is also
231 // destroyed. Callers can explicitly call createCombined() or
232 // createSeparate() if they know their use case better, and know if they are
233 // likely to reallocate the buffer later.
234 if (capacity <= kDefaultCombinedBufSize) {
235 return createCombined(capacity);
237 return createSeparate(capacity);
240 unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
241 // To save a memory allocation, allocate space for the IOBuf object, the
242 // SharedInfo struct, and the data itself all with a single call to malloc().
243 size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
244 size_t mallocSize = goodMallocSize(requiredStorage);
245 auto* storage = static_cast<HeapFullStorage*>(malloc(mallocSize));
247 new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse);
248 new (&storage->shared) SharedInfo(freeInternalBuf, storage);
250 uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
251 uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
252 size_t actualCapacity = size_t(storageEnd - bufAddr);
253 unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
254 InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
255 bufAddr, actualCapacity, bufAddr, 0));
259 unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
260 return std::make_unique<IOBuf>(CREATE, capacity);
263 unique_ptr<IOBuf> IOBuf::createChain(
264 size_t totalCapacity, uint64_t maxBufCapacity) {
265 unique_ptr<IOBuf> out = create(
266 std::min(totalCapacity, size_t(maxBufCapacity)));
267 size_t allocatedCapacity = out->capacity();
269 while (allocatedCapacity < totalCapacity) {
270 unique_ptr<IOBuf> newBuf = create(
271 std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity)));
272 allocatedCapacity += newBuf->capacity();
273 out->prependChain(std::move(newBuf));
279 IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
280 FreeFunction freeFn, void* userData,
284 data_(static_cast<uint8_t*>(buf)),
285 buf_(static_cast<uint8_t*>(buf)),
288 flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
290 setSharedInfo(new SharedInfo(freeFn, userData));
292 takeOwnershipError(freeOnError, buf, freeFn, userData);
297 unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
303 // TODO: We could allocate the IOBuf object and SharedInfo all in a single
304 // memory allocation. We could use the existing HeapStorage class, and
305 // define a new kSharedInfoInUse flag. We could change our code to call
306 // releaseStorage(kFlagFreeSharedInfo) when this kFlagFreeSharedInfo,
307 // rather than directly calling delete.
309 // Note that we always pass freeOnError as false to the constructor.
310 // If the constructor throws we'll handle it below. (We have to handle
311 // allocation failures from std::make_unique too.)
312 return std::make_unique<IOBuf>(
313 TAKE_OWNERSHIP, buf, capacity, length, freeFn, userData, false);
315 takeOwnershipError(freeOnError, buf, freeFn, userData);
320 IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
321 : IOBuf(InternalConstructor(), 0,
322 // We cast away the const-ness of the buffer here.
323 // This is okay since IOBuf users must use unshare() to create a copy
324 // of this buffer before writing to the buffer.
325 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
326 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {
329 IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
330 : IOBuf(op, br.data(), br.size()) {
333 unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
334 return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
337 IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) {
338 return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
341 IOBuf::IOBuf() noexcept {
344 IOBuf::IOBuf(IOBuf&& other) noexcept
345 : data_(other.data_),
347 length_(other.length_),
348 capacity_(other.capacity_),
349 flagsAndSharedInfo_(other.flagsAndSharedInfo_) {
350 // Reset other so it is a clean state to be destroyed.
351 other.data_ = nullptr;
352 other.buf_ = nullptr;
355 other.flagsAndSharedInfo_ = 0;
357 // If other was part of the chain, assume ownership of the rest of its chain.
358 // (It's only valid to perform move assignment on the head of a chain.)
359 if (other.next_ != &other) {
362 other.next_ = &other;
366 other.prev_ = &other;
369 // Sanity check to make sure that other is in a valid state to be destroyed.
370 DCHECK_EQ(other.prev_, &other);
371 DCHECK_EQ(other.next_, &other);
374 IOBuf::IOBuf(const IOBuf& other) {
375 *this = other.cloneAsValue();
378 IOBuf::IOBuf(InternalConstructor,
379 uintptr_t flagsAndSharedInfo,
390 flagsAndSharedInfo_(flagsAndSharedInfo) {
392 assert(data + length <= buf + capacity);
396 // Destroying an IOBuf destroys the entire chain.
397 // Users of IOBuf should only explicitly delete the head of any chain.
398 // The other elements in the chain will be automatically destroyed.
399 while (next_ != this) {
400 // Since unlink() returns unique_ptr() and we don't store it,
401 // it will automatically delete the unlinked element.
402 (void)next_->unlink();
408 IOBuf& IOBuf::operator=(IOBuf&& other) noexcept {
409 if (this == &other) {
413 // If we are part of a chain, delete the rest of the chain.
414 while (next_ != this) {
415 // Since unlink() returns unique_ptr() and we don't store it,
416 // it will automatically delete the unlinked element.
417 (void)next_->unlink();
420 // Decrement our refcount on the current buffer
423 // Take ownership of the other buffer's data
426 length_ = other.length_;
427 capacity_ = other.capacity_;
428 flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
429 // Reset other so it is a clean state to be destroyed.
430 other.data_ = nullptr;
431 other.buf_ = nullptr;
434 other.flagsAndSharedInfo_ = 0;
436 // If other was part of the chain, assume ownership of the rest of its chain.
437 // (It's only valid to perform move assignment on the head of a chain.)
438 if (other.next_ != &other) {
441 other.next_ = &other;
445 other.prev_ = &other;
448 // Sanity check to make sure that other is in a valid state to be destroyed.
449 DCHECK_EQ(other.prev_, &other);
450 DCHECK_EQ(other.next_, &other);
455 IOBuf& IOBuf::operator=(const IOBuf& other) {
456 if (this != &other) {
457 *this = IOBuf(other);
462 bool IOBuf::empty() const {
463 const IOBuf* current = this;
465 if (current->length() != 0) {
468 current = current->next_;
469 } while (current != this);
473 size_t IOBuf::countChainElements() const {
474 size_t numElements = 1;
475 for (IOBuf* current = next_; current != this; current = current->next_) {
481 uint64_t IOBuf::computeChainDataLength() const {
482 uint64_t fullLength = length_;
483 for (IOBuf* current = next_; current != this; current = current->next_) {
484 fullLength += current->length_;
489 void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
490 // Take ownership of the specified IOBuf
491 IOBuf* other = iobuf.release();
493 // Remember the pointer to the tail of the other chain
494 IOBuf* otherTail = other->prev_;
496 // Hook up prev_->next_ to point at the start of the other chain,
497 // and other->prev_ to point at prev_
498 prev_->next_ = other;
499 other->prev_ = prev_;
501 // Hook up otherTail->next_ to point at us,
502 // and prev_ to point back at otherTail,
503 otherTail->next_ = this;
507 unique_ptr<IOBuf> IOBuf::clone() const {
508 return std::make_unique<IOBuf>(cloneAsValue());
511 unique_ptr<IOBuf> IOBuf::cloneOne() const {
512 return std::make_unique<IOBuf>(cloneOneAsValue());
515 unique_ptr<IOBuf> IOBuf::cloneCoalesced() const {
516 return std::make_unique<IOBuf>(cloneCoalescedAsValue());
519 IOBuf IOBuf::cloneAsValue() const {
520 auto tmp = cloneOneAsValue();
522 for (IOBuf* current = next_; current != this; current = current->next_) {
523 tmp.prependChain(current->cloneOne());
529 IOBuf IOBuf::cloneOneAsValue() const {
530 if (SharedInfo* info = sharedInfo()) {
531 setFlags(kFlagMaybeShared);
532 info->refcount.fetch_add(1, std::memory_order_acq_rel);
535 InternalConstructor(),
543 IOBuf IOBuf::cloneCoalescedAsValue() const {
545 return cloneOneAsValue();
547 // Coalesce into newBuf
548 const uint64_t newLength = computeChainDataLength();
549 const uint64_t newHeadroom = headroom();
550 const uint64_t newTailroom = prev()->tailroom();
551 const uint64_t newCapacity = newLength + newHeadroom + newTailroom;
552 IOBuf newBuf{CREATE, newCapacity};
553 newBuf.advance(newHeadroom);
557 if (current->length() > 0) {
558 DCHECK_NOTNULL(current->data());
559 DCHECK_LE(current->length(), newBuf.tailroom());
560 memcpy(newBuf.writableTail(), current->data(), current->length());
561 newBuf.append(current->length());
563 current = current->next();
564 } while (current != this);
566 DCHECK_EQ(newLength, newBuf.length());
567 DCHECK_EQ(newHeadroom, newBuf.headroom());
568 DCHECK_LE(newTailroom, newBuf.tailroom());
573 void IOBuf::unshareOneSlow() {
574 // Allocate a new buffer for the data
576 SharedInfo* sharedInfo;
577 uint64_t actualCapacity;
578 allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
581 // Maintain the same amount of headroom. Since we maintained the same
582 // minimum capacity we also maintain at least the same amount of tailroom.
583 uint64_t headlen = headroom();
585 assert(data_ != nullptr);
586 memcpy(buf + headlen, data_, length_);
589 // Release our reference on the old buffer
591 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
592 setFlagsAndSharedInfo(0, sharedInfo);
594 // Update the buffer pointers to point to the new buffer
595 data_ = buf + headlen;
599 void IOBuf::unshareChained() {
600 // unshareChained() should only be called if we are part of a chain of
601 // multiple IOBufs. The caller should have already verified this.
604 IOBuf* current = this;
606 if (current->isSharedOne()) {
607 // we have to unshare
611 current = current->next_;
612 if (current == this) {
613 // None of the IOBufs in the chain are shared,
614 // so return without doing anything
619 // We have to unshare. Let coalesceSlow() do the work.
623 void IOBuf::markExternallyShared() {
624 IOBuf* current = this;
626 current->markExternallySharedOne();
627 current = current->next_;
628 } while (current != this);
631 void IOBuf::makeManagedChained() {
634 IOBuf* current = this;
636 current->makeManagedOne();
637 current = current->next_;
638 if (current == this) {
644 void IOBuf::coalesceSlow() {
645 // coalesceSlow() should only be called if we are part of a chain of multiple
646 // IOBufs. The caller should have already verified this.
649 // Compute the length of the entire chain
650 uint64_t newLength = 0;
653 newLength += end->length_;
655 } while (end != this);
657 coalesceAndReallocate(newLength, end);
658 // We should be only element left in the chain now
659 DCHECK(!isChained());
662 void IOBuf::coalesceSlow(size_t maxLength) {
663 // coalesceSlow() should only be called if we are part of a chain of multiple
664 // IOBufs. The caller should have already verified this.
666 DCHECK_LT(length_, maxLength);
668 // Compute the length of the entire chain
669 uint64_t newLength = 0;
672 newLength += end->length_;
674 if (newLength >= maxLength) {
678 throw std::overflow_error("attempted to coalesce more data than "
683 coalesceAndReallocate(newLength, end);
684 // We should have the requested length now
685 DCHECK_GE(length_, maxLength);
688 void IOBuf::coalesceAndReallocate(size_t newHeadroom,
691 size_t newTailroom) {
692 uint64_t newCapacity = newLength + newHeadroom + newTailroom;
694 // Allocate space for the coalesced buffer.
695 // We always convert to an external buffer, even if we happened to be an
696 // internal buffer before.
699 uint64_t actualCapacity;
700 allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
702 // Copy the data into the new buffer
703 uint8_t* newData = newBuf + newHeadroom;
704 uint8_t* p = newData;
705 IOBuf* current = this;
706 size_t remaining = newLength;
708 if (current->length_ > 0) {
709 assert(current->length_ <= remaining);
710 assert(current->data_ != nullptr);
711 remaining -= current->length_;
712 memcpy(p, current->data_, current->length_);
713 p += current->length_;
715 current = current->next_;
716 } while (current != end);
717 assert(remaining == 0);
719 // Point at the new buffer
722 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
723 setFlagsAndSharedInfo(0, newInfo);
725 capacity_ = actualCapacity;
730 // Separate from the rest of our chain.
731 // Since we don't store the unique_ptr returned by separateChain(),
732 // this will immediately delete the returned subchain.
734 (void)separateChain(next_, current->prev_);
738 void IOBuf::decrementRefcount() {
739 // Externally owned buffers don't have a SharedInfo object and aren't managed
740 // by the reference count
741 SharedInfo* info = sharedInfo();
746 // Decrement the refcount
747 uint32_t newcnt = info->refcount.fetch_sub(
748 1, std::memory_order_acq_rel);
749 // Note that fetch_sub() returns the value before we decremented.
750 // If it is 1, we were the only remaining user; if it is greater there are
751 // still other users.
756 // We were the last user. Free the buffer
759 // Free the SharedInfo if it was allocated separately.
761 // This is only used by takeOwnership().
763 // To avoid this special case handling in decrementRefcount(), we could have
764 // takeOwnership() set a custom freeFn() that calls the user's free function
765 // then frees the SharedInfo object. (This would require that
766 // takeOwnership() store the user's free function with its allocated
767 // SharedInfo object.) However, handling this specially with a flag seems
768 // like it shouldn't be problematic.
769 if (flags() & kFlagFreeSharedInfo) {
774 void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
775 size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
776 DCHECK_LT(newCapacity, UINT32_MAX);
778 // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
779 // reallocate and free the original buffer. It should only ever be called if
780 // we are the only user of the buffer.
781 DCHECK(!isSharedOne());
783 // We'll need to reallocate the buffer.
784 // There are a few options.
785 // - If we have enough total room, move the data around in the buffer
786 // and adjust the data_ pointer.
787 // - If we're using an internal buffer, we'll switch to an external
788 // buffer with enough headroom and tailroom.
789 // - If we have enough headroom (headroom() >= minHeadroom) but not too much
790 // (so we don't waste memory), we can try one of two things, depending on
791 // whether we use jemalloc or not:
792 // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
793 // - If not using jemalloc and we don't have too much to copy,
794 // we'll use realloc() (note that realloc might have to copy
795 // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h)
796 // - Otherwise, bite the bullet and reallocate.
797 if (headroom() + tailroom() >= minHeadroom + minTailroom) {
798 uint8_t* newData = writableBuffer() + minHeadroom;
799 memmove(newData, data_, length_);
804 size_t newAllocatedCapacity = 0;
805 uint8_t* newBuffer = nullptr;
806 uint64_t newHeadroom = 0;
807 uint64_t oldHeadroom = headroom();
809 // If we have a buffer allocated with malloc and we just need more tailroom,
810 // try to use realloc()/xallocx() to grow the buffer in place.
811 SharedInfo* info = sharedInfo();
812 if (info && (info->freeFn == nullptr) && length_ != 0 &&
813 oldHeadroom >= minHeadroom) {
814 size_t headSlack = oldHeadroom - minHeadroom;
815 newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
816 if (usingJEMalloc()) {
817 // We assume that tailroom is more useful and more important than
818 // headroom (not least because realloc / xallocx allow us to grow the
819 // buffer at the tail, but not at the head) So, if we have more headroom
820 // than we need, we consider that "wasted". We arbitrarily define "too
821 // much" headroom to be 25% of the capacity.
822 if (headSlack * 4 <= newCapacity) {
823 size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
825 if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
826 if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
827 newBuffer = static_cast<uint8_t*>(p);
828 newHeadroom = oldHeadroom;
830 // if xallocx failed, do nothing, fall back to malloc/memcpy/free
833 } else { // Not using jemalloc
834 size_t copySlack = capacity() - length_;
835 if (copySlack * 2 <= length_) {
836 void* p = realloc(buf_, newAllocatedCapacity);
837 if (UNLIKELY(p == nullptr)) {
838 throw std::bad_alloc();
840 newBuffer = static_cast<uint8_t*>(p);
841 newHeadroom = oldHeadroom;
846 // None of the previous reallocation strategies worked (or we're using
847 // an internal buffer). malloc/copy/free.
848 if (newBuffer == nullptr) {
849 newAllocatedCapacity = goodExtBufferSize(newCapacity);
850 void* p = malloc(newAllocatedCapacity);
851 if (UNLIKELY(p == nullptr)) {
852 throw std::bad_alloc();
854 newBuffer = static_cast<uint8_t*>(p);
856 assert(data_ != nullptr);
857 memcpy(newBuffer + minHeadroom, data_, length_);
862 newHeadroom = minHeadroom;
866 initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
868 if (flags() & kFlagFreeSharedInfo) {
872 setFlagsAndSharedInfo(0, info);
875 data_ = newBuffer + newHeadroom;
876 // length_ is unchanged
879 void IOBuf::freeExtBuffer() {
880 SharedInfo* info = sharedInfo();
885 info->freeFn(buf_, info->userData);
887 // The user's free function should never throw. Otherwise we might
888 // throw from the IOBuf destructor. Other code paths like coalesce()
889 // also assume that decrementRefcount() cannot throw.
897 void IOBuf::allocExtBuffer(uint64_t minCapacity,
899 SharedInfo** infoReturn,
900 uint64_t* capacityReturn) {
901 size_t mallocSize = goodExtBufferSize(minCapacity);
902 uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
903 if (UNLIKELY(buf == nullptr)) {
904 throw std::bad_alloc();
906 initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
910 size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
911 // Determine how much space we should allocate. We'll store the SharedInfo
912 // for the external buffer just after the buffer itself. (We store it just
913 // after the buffer rather than just before so that the code can still just
914 // use free(buf_) to free the buffer.)
915 size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
916 // Add room for padding so that the SharedInfo will be aligned on an 8-byte
918 minSize = (minSize + 7) & ~7;
920 // Use goodMallocSize() to bump up the capacity to a decent size to request
921 // from malloc, so we can use all of the space that malloc will probably give
923 return goodMallocSize(minSize);
926 void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
927 SharedInfo** infoReturn,
928 uint64_t* capacityReturn) {
929 // Find the SharedInfo storage at the end of the buffer
930 // and construct the SharedInfo.
931 uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
932 SharedInfo* sharedInfo = new(infoStart) SharedInfo;
934 *capacityReturn = uint64_t(infoStart - buf);
935 *infoReturn = sharedInfo;
938 fbstring IOBuf::moveToFbString() {
939 // malloc-allocated buffers are just fine, everything else needs
940 // to be turned into one.
941 if (!sharedInfo() || // user owned, not ours to give up
942 sharedInfo()->freeFn || // not malloc()-ed
943 headroom() != 0 || // malloc()-ed block doesn't start at beginning
944 tailroom() == 0 || // no room for NUL terminator
945 isShared() || // shared
946 isChained()) { // chained
947 // We might as well get rid of all head and tailroom if we're going
948 // to reallocate; we need 1 byte for NUL terminator.
949 coalesceAndReallocate(0, computeChainDataLength(), this, 1);
952 // Ensure NUL terminated
954 fbstring str(reinterpret_cast<char*>(writableData()),
955 length(), capacity(),
956 AcquireMallocatedString());
958 if (flags() & kFlagFreeSharedInfo) {
962 // Reset to a state where we can be deleted cleanly
963 flagsAndSharedInfo_ = 0;
969 IOBuf::Iterator IOBuf::cbegin() const {
970 return Iterator(this, this);
973 IOBuf::Iterator IOBuf::cend() const {
974 return Iterator(nullptr, nullptr);
977 folly::fbvector<struct iovec> IOBuf::getIov() const {
978 folly::fbvector<struct iovec> iov;
979 iov.reserve(countChainElements());
984 void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const {
985 IOBuf const* p = this;
987 // some code can get confused by empty iovs, so skip them
988 if (p->length() > 0) {
989 iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())});
995 size_t IOBuf::fillIov(struct iovec* iov, size_t len) const {
996 IOBuf const* p = this;
999 // some code can get confused by empty iovs, so skip them
1000 if (p->length() > 0) {
1001 iov[i].iov_base = const_cast<uint8_t*>(p->data());
1002 iov[i].iov_len = p->length();
1013 size_t IOBufHash::operator()(const IOBuf& buf) const {
1014 folly::hash::SpookyHashV2 hasher;
1016 io::Cursor cursor(&buf);
1018 auto b = cursor.peekBytes();
1022 hasher.Update(b.data(), b.size());
1023 cursor.skip(b.size());
1027 hasher.Final(&h1, &h2);
1031 bool IOBufEqual::operator()(const IOBuf& a, const IOBuf& b) const {
1035 auto ba = ca.peekBytes();
1036 auto bb = cb.peekBytes();
1037 if (ba.empty() && bb.empty()) {
1039 } else if (ba.empty() || bb.empty()) {
1042 size_t n = std::min(ba.size(), bb.size());
1044 if (memcmp(ba.data(), bb.data(), n)) {
1052 } // namespace folly