2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_LIMIT_MACROS
18 #define __STDC_LIMIT_MACROS
21 #include <folly/io/IOBuf.h>
23 #include <folly/Conv.h>
24 #include <folly/Likely.h>
25 #include <folly/Malloc.h>
26 #include <folly/Memory.h>
27 #include <folly/ScopeGuard.h>
28 #include <folly/SpookyHashV2.h>
29 #include <folly/io/Cursor.h>
36 using std::unique_ptr;
42 // This memory segment contains an IOBuf that is still in use
44 // This memory segment contains buffer data that is still in use
49 // When create() is called for buffers less than kDefaultCombinedBufSize,
50 // we allocate a single combined memory segment for the IOBuf and the data
51 // together. See the comments for createCombined()/createSeparate() for more
54 // (The size of 1k is largely just a guess here. We could could probably do
55 // benchmarks of real applications to see if adjusting this number makes a
56 // difference. Callers that know their exact use case can also explicitly
57 // call createCombined() or createSeparate().)
58 kDefaultCombinedBufSize = 1024
61 // Helper function for IOBuf::takeOwnership()
62 void takeOwnershipError(bool freeOnError, void* buf,
63 folly::IOBuf::FreeFunction freeFn,
73 freeFn(buf, userData);
75 // The user's free function is not allowed to throw.
76 // (We are already in the middle of throwing an exception, so
77 // we cannot let this exception go unhandled.)
82 } // unnamed namespace
86 struct IOBuf::HeapPrefix {
87 HeapPrefix(uint16_t flg)
91 // Reset magic to 0 on destruction. This is solely for debugging purposes
92 // to help catch bugs where someone tries to use HeapStorage after it has
98 std::atomic<uint16_t> flags;
101 struct IOBuf::HeapStorage {
103 // The IOBuf is last in the HeapStorage object.
104 // This way operator new will work even if allocating a subclass of IOBuf
105 // that requires more space.
109 struct IOBuf::HeapFullStorage {
110 // Make sure jemalloc allocates from the 64-byte class. Putting this here
111 // because HeapStorage is private so it can't be at namespace level.
112 static_assert(sizeof(HeapStorage) <= 64,
113 "IOBuf may not grow over 56 bytes!");
117 std::max_align_t align;
120 IOBuf::SharedInfo::SharedInfo()
123 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
124 // no other threads should be referring to it yet.
125 refcount.store(1, std::memory_order_relaxed);
128 IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
131 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
132 // no other threads should be referring to it yet.
133 refcount.store(1, std::memory_order_relaxed);
136 void* IOBuf::operator new(size_t size) {
137 size_t fullSize = offsetof(HeapStorage, buf) + size;
138 auto* storage = static_cast<HeapStorage*>(malloc(fullSize));
139 // operator new is not allowed to return nullptr
140 if (UNLIKELY(storage == nullptr)) {
141 throw std::bad_alloc();
144 new (&storage->prefix) HeapPrefix(kIOBufInUse);
145 return &(storage->buf);
148 void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; }
150 void IOBuf::operator delete(void* ptr) {
151 auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
152 auto* storage = reinterpret_cast<HeapStorage*>(storageAddr);
153 releaseStorage(storage, kIOBufInUse);
156 void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) {
157 CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic));
159 // Use relaxed memory order here. If we are unlucky and happen to get
160 // out-of-date data the compare_exchange_weak() call below will catch
161 // it and load new data with memory_order_acq_rel.
162 auto flags = storage->prefix.flags.load(std::memory_order_acquire);
163 DCHECK_EQ((flags & freeFlags), freeFlags);
166 uint16_t newFlags = uint16_t(flags & ~freeFlags);
168 // The storage space is now unused. Free it.
169 storage->prefix.HeapPrefix::~HeapPrefix();
174 // This storage segment still contains portions that are in use.
175 // Just clear the flags specified in freeFlags for now.
176 auto ret = storage->prefix.flags.compare_exchange_weak(
177 flags, newFlags, std::memory_order_acq_rel);
179 // We successfully updated the flags.
183 // We failed to update the flags. Some other thread probably updated them
184 // and cleared some of the other bits. Continue around the loop to see if
185 // we are the last user now, or if we need to try updating the flags again.
189 void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
190 auto* storage = static_cast<HeapStorage*>(userData);
191 releaseStorage(storage, kDataInUse);
194 IOBuf::IOBuf(CreateOp, uint64_t capacity)
199 flagsAndSharedInfo_(0) {
201 allocExtBuffer(capacity, &buf_, &info, &capacity_);
206 IOBuf::IOBuf(CopyBufferOp /* op */,
210 uint64_t minTailroom)
211 : IOBuf(CREATE, headroom + size + minTailroom) {
214 assert(buf != nullptr);
215 memcpy(writableData(), buf, size);
220 IOBuf::IOBuf(CopyBufferOp op, ByteRange br,
221 uint64_t headroom, uint64_t minTailroom)
222 : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {
225 unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
226 // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
227 // all with a single allocation.
229 // We don't do this for larger buffers since it can be wasteful if the user
230 // needs to reallocate the buffer but keeps using the same IOBuf object.
231 // In this case we can't free the data space until the IOBuf is also
232 // destroyed. Callers can explicitly call createCombined() or
233 // createSeparate() if they know their use case better, and know if they are
234 // likely to reallocate the buffer later.
235 if (capacity <= kDefaultCombinedBufSize) {
236 return createCombined(capacity);
238 return createSeparate(capacity);
241 unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
242 // To save a memory allocation, allocate space for the IOBuf object, the
243 // SharedInfo struct, and the data itself all with a single call to malloc().
244 size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
245 size_t mallocSize = goodMallocSize(requiredStorage);
246 auto* storage = static_cast<HeapFullStorage*>(malloc(mallocSize));
248 new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse);
249 new (&storage->shared) SharedInfo(freeInternalBuf, storage);
251 uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
252 uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
253 size_t actualCapacity = size_t(storageEnd - bufAddr);
254 unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
255 InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
256 bufAddr, actualCapacity, bufAddr, 0));
260 unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
261 return std::make_unique<IOBuf>(CREATE, capacity);
264 unique_ptr<IOBuf> IOBuf::createChain(
265 size_t totalCapacity, uint64_t maxBufCapacity) {
266 unique_ptr<IOBuf> out = create(
267 std::min(totalCapacity, size_t(maxBufCapacity)));
268 size_t allocatedCapacity = out->capacity();
270 while (allocatedCapacity < totalCapacity) {
271 unique_ptr<IOBuf> newBuf = create(
272 std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity)));
273 allocatedCapacity += newBuf->capacity();
274 out->prependChain(std::move(newBuf));
280 IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
281 FreeFunction freeFn, void* userData,
285 data_(static_cast<uint8_t*>(buf)),
286 buf_(static_cast<uint8_t*>(buf)),
289 flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
291 setSharedInfo(new SharedInfo(freeFn, userData));
293 takeOwnershipError(freeOnError, buf, freeFn, userData);
298 unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
304 // TODO: We could allocate the IOBuf object and SharedInfo all in a single
305 // memory allocation. We could use the existing HeapStorage class, and
306 // define a new kSharedInfoInUse flag. We could change our code to call
307 // releaseStorage(kFlagFreeSharedInfo) when this kFlagFreeSharedInfo,
308 // rather than directly calling delete.
310 // Note that we always pass freeOnError as false to the constructor.
311 // If the constructor throws we'll handle it below. (We have to handle
312 // allocation failures from std::make_unique too.)
313 return std::make_unique<IOBuf>(
314 TAKE_OWNERSHIP, buf, capacity, length, freeFn, userData, false);
316 takeOwnershipError(freeOnError, buf, freeFn, userData);
321 IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
322 : IOBuf(InternalConstructor(), 0,
323 // We cast away the const-ness of the buffer here.
324 // This is okay since IOBuf users must use unshare() to create a copy
325 // of this buffer before writing to the buffer.
326 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
327 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {
330 IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
331 : IOBuf(op, br.data(), br.size()) {
334 unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
335 return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
338 IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) {
339 return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
342 IOBuf::IOBuf() noexcept {
345 IOBuf::IOBuf(IOBuf&& other) noexcept
346 : data_(other.data_),
348 length_(other.length_),
349 capacity_(other.capacity_),
350 flagsAndSharedInfo_(other.flagsAndSharedInfo_) {
351 // Reset other so it is a clean state to be destroyed.
352 other.data_ = nullptr;
353 other.buf_ = nullptr;
356 other.flagsAndSharedInfo_ = 0;
358 // If other was part of the chain, assume ownership of the rest of its chain.
359 // (It's only valid to perform move assignment on the head of a chain.)
360 if (other.next_ != &other) {
363 other.next_ = &other;
367 other.prev_ = &other;
370 // Sanity check to make sure that other is in a valid state to be destroyed.
371 DCHECK_EQ(other.prev_, &other);
372 DCHECK_EQ(other.next_, &other);
375 IOBuf::IOBuf(const IOBuf& other) {
376 *this = other.cloneAsValue();
379 IOBuf::IOBuf(InternalConstructor,
380 uintptr_t flagsAndSharedInfo,
391 flagsAndSharedInfo_(flagsAndSharedInfo) {
393 assert(data + length <= buf + capacity);
397 // Destroying an IOBuf destroys the entire chain.
398 // Users of IOBuf should only explicitly delete the head of any chain.
399 // The other elements in the chain will be automatically destroyed.
400 while (next_ != this) {
401 // Since unlink() returns unique_ptr() and we don't store it,
402 // it will automatically delete the unlinked element.
403 (void)next_->unlink();
409 IOBuf& IOBuf::operator=(IOBuf&& other) noexcept {
410 if (this == &other) {
414 // If we are part of a chain, delete the rest of the chain.
415 while (next_ != this) {
416 // Since unlink() returns unique_ptr() and we don't store it,
417 // it will automatically delete the unlinked element.
418 (void)next_->unlink();
421 // Decrement our refcount on the current buffer
424 // Take ownership of the other buffer's data
427 length_ = other.length_;
428 capacity_ = other.capacity_;
429 flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
430 // Reset other so it is a clean state to be destroyed.
431 other.data_ = nullptr;
432 other.buf_ = nullptr;
435 other.flagsAndSharedInfo_ = 0;
437 // If other was part of the chain, assume ownership of the rest of its chain.
438 // (It's only valid to perform move assignment on the head of a chain.)
439 if (other.next_ != &other) {
442 other.next_ = &other;
446 other.prev_ = &other;
449 // Sanity check to make sure that other is in a valid state to be destroyed.
450 DCHECK_EQ(other.prev_, &other);
451 DCHECK_EQ(other.next_, &other);
456 IOBuf& IOBuf::operator=(const IOBuf& other) {
457 if (this != &other) {
458 *this = IOBuf(other);
463 bool IOBuf::empty() const {
464 const IOBuf* current = this;
466 if (current->length() != 0) {
469 current = current->next_;
470 } while (current != this);
474 size_t IOBuf::countChainElements() const {
475 size_t numElements = 1;
476 for (IOBuf* current = next_; current != this; current = current->next_) {
482 uint64_t IOBuf::computeChainDataLength() const {
483 uint64_t fullLength = length_;
484 for (IOBuf* current = next_; current != this; current = current->next_) {
485 fullLength += current->length_;
490 void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
491 // Take ownership of the specified IOBuf
492 IOBuf* other = iobuf.release();
494 // Remember the pointer to the tail of the other chain
495 IOBuf* otherTail = other->prev_;
497 // Hook up prev_->next_ to point at the start of the other chain,
498 // and other->prev_ to point at prev_
499 prev_->next_ = other;
500 other->prev_ = prev_;
502 // Hook up otherTail->next_ to point at us,
503 // and prev_ to point back at otherTail,
504 otherTail->next_ = this;
508 unique_ptr<IOBuf> IOBuf::clone() const {
509 return std::make_unique<IOBuf>(cloneAsValue());
512 unique_ptr<IOBuf> IOBuf::cloneOne() const {
513 return std::make_unique<IOBuf>(cloneOneAsValue());
516 unique_ptr<IOBuf> IOBuf::cloneCoalesced() const {
517 return std::make_unique<IOBuf>(cloneCoalescedAsValue());
520 IOBuf IOBuf::cloneAsValue() const {
521 auto tmp = cloneOneAsValue();
523 for (IOBuf* current = next_; current != this; current = current->next_) {
524 tmp.prependChain(current->cloneOne());
530 IOBuf IOBuf::cloneOneAsValue() const {
531 if (SharedInfo* info = sharedInfo()) {
532 setFlags(kFlagMaybeShared);
533 info->refcount.fetch_add(1, std::memory_order_acq_rel);
536 InternalConstructor(),
544 IOBuf IOBuf::cloneCoalescedAsValue() const {
546 return cloneOneAsValue();
548 // Coalesce into newBuf
549 const uint64_t newLength = computeChainDataLength();
550 const uint64_t newHeadroom = headroom();
551 const uint64_t newTailroom = prev()->tailroom();
552 const uint64_t newCapacity = newLength + newHeadroom + newTailroom;
553 IOBuf newBuf{CREATE, newCapacity};
554 newBuf.advance(newHeadroom);
558 if (current->length() > 0) {
559 DCHECK_NOTNULL(current->data());
560 DCHECK_LE(current->length(), newBuf.tailroom());
561 memcpy(newBuf.writableTail(), current->data(), current->length());
562 newBuf.append(current->length());
564 current = current->next();
565 } while (current != this);
567 DCHECK_EQ(newLength, newBuf.length());
568 DCHECK_EQ(newHeadroom, newBuf.headroom());
569 DCHECK_LE(newTailroom, newBuf.tailroom());
574 void IOBuf::unshareOneSlow() {
575 // Allocate a new buffer for the data
577 SharedInfo* sharedInfo;
578 uint64_t actualCapacity;
579 allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
582 // Maintain the same amount of headroom. Since we maintained the same
583 // minimum capacity we also maintain at least the same amount of tailroom.
584 uint64_t headlen = headroom();
586 assert(data_ != nullptr);
587 memcpy(buf + headlen, data_, length_);
590 // Release our reference on the old buffer
592 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
593 setFlagsAndSharedInfo(0, sharedInfo);
595 // Update the buffer pointers to point to the new buffer
596 data_ = buf + headlen;
600 void IOBuf::unshareChained() {
601 // unshareChained() should only be called if we are part of a chain of
602 // multiple IOBufs. The caller should have already verified this.
605 IOBuf* current = this;
607 if (current->isSharedOne()) {
608 // we have to unshare
612 current = current->next_;
613 if (current == this) {
614 // None of the IOBufs in the chain are shared,
615 // so return without doing anything
620 // We have to unshare. Let coalesceSlow() do the work.
624 void IOBuf::markExternallyShared() {
625 IOBuf* current = this;
627 current->markExternallySharedOne();
628 current = current->next_;
629 } while (current != this);
632 void IOBuf::makeManagedChained() {
635 IOBuf* current = this;
637 current->makeManagedOne();
638 current = current->next_;
639 if (current == this) {
645 void IOBuf::coalesceSlow() {
646 // coalesceSlow() should only be called if we are part of a chain of multiple
647 // IOBufs. The caller should have already verified this.
650 // Compute the length of the entire chain
651 uint64_t newLength = 0;
654 newLength += end->length_;
656 } while (end != this);
658 coalesceAndReallocate(newLength, end);
659 // We should be only element left in the chain now
660 DCHECK(!isChained());
663 void IOBuf::coalesceSlow(size_t maxLength) {
664 // coalesceSlow() should only be called if we are part of a chain of multiple
665 // IOBufs. The caller should have already verified this.
667 DCHECK_LT(length_, maxLength);
669 // Compute the length of the entire chain
670 uint64_t newLength = 0;
673 newLength += end->length_;
675 if (newLength >= maxLength) {
679 throw std::overflow_error("attempted to coalesce more data than "
684 coalesceAndReallocate(newLength, end);
685 // We should have the requested length now
686 DCHECK_GE(length_, maxLength);
689 void IOBuf::coalesceAndReallocate(size_t newHeadroom,
692 size_t newTailroom) {
693 uint64_t newCapacity = newLength + newHeadroom + newTailroom;
695 // Allocate space for the coalesced buffer.
696 // We always convert to an external buffer, even if we happened to be an
697 // internal buffer before.
700 uint64_t actualCapacity;
701 allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
703 // Copy the data into the new buffer
704 uint8_t* newData = newBuf + newHeadroom;
705 uint8_t* p = newData;
706 IOBuf* current = this;
707 size_t remaining = newLength;
709 if (current->length_ > 0) {
710 assert(current->length_ <= remaining);
711 assert(current->data_ != nullptr);
712 remaining -= current->length_;
713 memcpy(p, current->data_, current->length_);
714 p += current->length_;
716 current = current->next_;
717 } while (current != end);
718 assert(remaining == 0);
720 // Point at the new buffer
723 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
724 setFlagsAndSharedInfo(0, newInfo);
726 capacity_ = actualCapacity;
731 // Separate from the rest of our chain.
732 // Since we don't store the unique_ptr returned by separateChain(),
733 // this will immediately delete the returned subchain.
735 (void)separateChain(next_, current->prev_);
739 void IOBuf::decrementRefcount() {
740 // Externally owned buffers don't have a SharedInfo object and aren't managed
741 // by the reference count
742 SharedInfo* info = sharedInfo();
747 // Decrement the refcount
748 uint32_t newcnt = info->refcount.fetch_sub(
749 1, std::memory_order_acq_rel);
750 // Note that fetch_sub() returns the value before we decremented.
751 // If it is 1, we were the only remaining user; if it is greater there are
752 // still other users.
757 // We were the last user. Free the buffer
760 // Free the SharedInfo if it was allocated separately.
762 // This is only used by takeOwnership().
764 // To avoid this special case handling in decrementRefcount(), we could have
765 // takeOwnership() set a custom freeFn() that calls the user's free function
766 // then frees the SharedInfo object. (This would require that
767 // takeOwnership() store the user's free function with its allocated
768 // SharedInfo object.) However, handling this specially with a flag seems
769 // like it shouldn't be problematic.
770 if (flags() & kFlagFreeSharedInfo) {
775 void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
776 size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
777 DCHECK_LT(newCapacity, UINT32_MAX);
779 // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
780 // reallocate and free the original buffer. It should only ever be called if
781 // we are the only user of the buffer.
782 DCHECK(!isSharedOne());
784 // We'll need to reallocate the buffer.
785 // There are a few options.
786 // - If we have enough total room, move the data around in the buffer
787 // and adjust the data_ pointer.
788 // - If we're using an internal buffer, we'll switch to an external
789 // buffer with enough headroom and tailroom.
790 // - If we have enough headroom (headroom() >= minHeadroom) but not too much
791 // (so we don't waste memory), we can try one of two things, depending on
792 // whether we use jemalloc or not:
793 // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
794 // - If not using jemalloc and we don't have too much to copy,
795 // we'll use realloc() (note that realloc might have to copy
796 // headroom + data + tailroom, see smartRealloc in folly/Malloc.h)
797 // - Otherwise, bite the bullet and reallocate.
798 if (headroom() + tailroom() >= minHeadroom + minTailroom) {
799 uint8_t* newData = writableBuffer() + minHeadroom;
800 memmove(newData, data_, length_);
805 size_t newAllocatedCapacity = 0;
806 uint8_t* newBuffer = nullptr;
807 uint64_t newHeadroom = 0;
808 uint64_t oldHeadroom = headroom();
810 // If we have a buffer allocated with malloc and we just need more tailroom,
811 // try to use realloc()/xallocx() to grow the buffer in place.
812 SharedInfo* info = sharedInfo();
813 if (info && (info->freeFn == nullptr) && length_ != 0 &&
814 oldHeadroom >= minHeadroom) {
815 size_t headSlack = oldHeadroom - minHeadroom;
816 newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
817 if (usingJEMalloc()) {
818 // We assume that tailroom is more useful and more important than
819 // headroom (not least because realloc / xallocx allow us to grow the
820 // buffer at the tail, but not at the head) So, if we have more headroom
821 // than we need, we consider that "wasted". We arbitrarily define "too
822 // much" headroom to be 25% of the capacity.
823 if (headSlack * 4 <= newCapacity) {
824 size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
826 if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
827 if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
828 newBuffer = static_cast<uint8_t*>(p);
829 newHeadroom = oldHeadroom;
831 // if xallocx failed, do nothing, fall back to malloc/memcpy/free
834 } else { // Not using jemalloc
835 size_t copySlack = capacity() - length_;
836 if (copySlack * 2 <= length_) {
837 void* p = realloc(buf_, newAllocatedCapacity);
838 if (UNLIKELY(p == nullptr)) {
839 throw std::bad_alloc();
841 newBuffer = static_cast<uint8_t*>(p);
842 newHeadroom = oldHeadroom;
847 // None of the previous reallocation strategies worked (or we're using
848 // an internal buffer). malloc/copy/free.
849 if (newBuffer == nullptr) {
850 newAllocatedCapacity = goodExtBufferSize(newCapacity);
851 void* p = malloc(newAllocatedCapacity);
852 if (UNLIKELY(p == nullptr)) {
853 throw std::bad_alloc();
855 newBuffer = static_cast<uint8_t*>(p);
857 assert(data_ != nullptr);
858 memcpy(newBuffer + minHeadroom, data_, length_);
863 newHeadroom = minHeadroom;
867 initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
869 if (flags() & kFlagFreeSharedInfo) {
873 setFlagsAndSharedInfo(0, info);
876 data_ = newBuffer + newHeadroom;
877 // length_ is unchanged
880 void IOBuf::freeExtBuffer() {
881 SharedInfo* info = sharedInfo();
886 info->freeFn(buf_, info->userData);
888 // The user's free function should never throw. Otherwise we might
889 // throw from the IOBuf destructor. Other code paths like coalesce()
890 // also assume that decrementRefcount() cannot throw.
898 void IOBuf::allocExtBuffer(uint64_t minCapacity,
900 SharedInfo** infoReturn,
901 uint64_t* capacityReturn) {
902 size_t mallocSize = goodExtBufferSize(minCapacity);
903 uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
904 if (UNLIKELY(buf == nullptr)) {
905 throw std::bad_alloc();
907 initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
911 size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
912 // Determine how much space we should allocate. We'll store the SharedInfo
913 // for the external buffer just after the buffer itself. (We store it just
914 // after the buffer rather than just before so that the code can still just
915 // use free(buf_) to free the buffer.)
916 size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
917 // Add room for padding so that the SharedInfo will be aligned on an 8-byte
919 minSize = (minSize + 7) & ~7;
921 // Use goodMallocSize() to bump up the capacity to a decent size to request
922 // from malloc, so we can use all of the space that malloc will probably give
924 return goodMallocSize(minSize);
927 void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
928 SharedInfo** infoReturn,
929 uint64_t* capacityReturn) {
930 // Find the SharedInfo storage at the end of the buffer
931 // and construct the SharedInfo.
932 uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
933 SharedInfo* sharedInfo = new(infoStart) SharedInfo;
935 *capacityReturn = uint64_t(infoStart - buf);
936 *infoReturn = sharedInfo;
939 fbstring IOBuf::moveToFbString() {
940 // malloc-allocated buffers are just fine, everything else needs
941 // to be turned into one.
942 if (!sharedInfo() || // user owned, not ours to give up
943 sharedInfo()->freeFn || // not malloc()-ed
944 headroom() != 0 || // malloc()-ed block doesn't start at beginning
945 tailroom() == 0 || // no room for NUL terminator
946 isShared() || // shared
947 isChained()) { // chained
948 // We might as well get rid of all head and tailroom if we're going
949 // to reallocate; we need 1 byte for NUL terminator.
950 coalesceAndReallocate(0, computeChainDataLength(), this, 1);
953 // Ensure NUL terminated
955 fbstring str(reinterpret_cast<char*>(writableData()),
956 length(), capacity(),
957 AcquireMallocatedString());
959 if (flags() & kFlagFreeSharedInfo) {
963 // Reset to a state where we can be deleted cleanly
964 flagsAndSharedInfo_ = 0;
970 IOBuf::Iterator IOBuf::cbegin() const {
971 return Iterator(this, this);
974 IOBuf::Iterator IOBuf::cend() const {
975 return Iterator(nullptr, nullptr);
978 folly::fbvector<struct iovec> IOBuf::getIov() const {
979 folly::fbvector<struct iovec> iov;
980 iov.reserve(countChainElements());
985 void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const {
986 IOBuf const* p = this;
988 // some code can get confused by empty iovs, so skip them
989 if (p->length() > 0) {
990 iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())});
996 size_t IOBuf::fillIov(struct iovec* iov, size_t len) const {
997 IOBuf const* p = this;
1000 // some code can get confused by empty iovs, so skip them
1001 if (p->length() > 0) {
1002 iov[i].iov_base = const_cast<uint8_t*>(p->data());
1003 iov[i].iov_len = p->length();
1014 size_t IOBufHash::operator()(const IOBuf& buf) const {
1015 folly::hash::SpookyHashV2 hasher;
1017 io::Cursor cursor(&buf);
1019 auto b = cursor.peekBytes();
1023 hasher.Update(b.data(), b.size());
1024 cursor.skip(b.size());
1028 hasher.Final(&h1, &h2);
1032 bool IOBufEqual::operator()(const IOBuf& a, const IOBuf& b) const {
1036 auto ba = ca.peekBytes();
1037 auto bb = cb.peekBytes();
1038 if (ba.empty() && bb.empty()) {
1040 } else if (ba.empty() || bb.empty()) {
1043 size_t n = std::min(ba.size(), bb.size());
1045 if (memcmp(ba.data(), bb.data(), n)) {