2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_LIMIT_MACROS
18 #define __STDC_LIMIT_MACROS
21 #include <folly/io/IOBuf.h>
28 #include <folly/Conv.h>
29 #include <folly/Likely.h>
30 #include <folly/Malloc.h>
31 #include <folly/Memory.h>
32 #include <folly/ScopeGuard.h>
33 #include <folly/hash/SpookyHashV2.h>
34 #include <folly/io/Cursor.h>
36 using std::unique_ptr;
42 // This memory segment contains an IOBuf that is still in use
44 // This memory segment contains buffer data that is still in use
49 // When create() is called for buffers less than kDefaultCombinedBufSize,
50 // we allocate a single combined memory segment for the IOBuf and the data
51 // together. See the comments for createCombined()/createSeparate() for more
54 // (The size of 1k is largely just a guess here. We could could probably do
55 // benchmarks of real applications to see if adjusting this number makes a
56 // difference. Callers that know their exact use case can also explicitly
57 // call createCombined() or createSeparate().)
58 kDefaultCombinedBufSize = 1024
61 // Helper function for IOBuf::takeOwnership()
62 void takeOwnershipError(bool freeOnError, void* buf,
63 folly::IOBuf::FreeFunction freeFn,
73 freeFn(buf, userData);
75 // The user's free function is not allowed to throw.
76 // (We are already in the middle of throwing an exception, so
77 // we cannot let this exception go unhandled.)
86 struct IOBuf::HeapPrefix {
87 explicit HeapPrefix(uint16_t flg) : magic(kHeapMagic), flags(flg) {}
89 // Reset magic to 0 on destruction. This is solely for debugging purposes
90 // to help catch bugs where someone tries to use HeapStorage after it has
96 std::atomic<uint16_t> flags;
99 struct IOBuf::HeapStorage {
101 // The IOBuf is last in the HeapStorage object.
102 // This way operator new will work even if allocating a subclass of IOBuf
103 // that requires more space.
107 struct IOBuf::HeapFullStorage {
108 // Make sure jemalloc allocates from the 64-byte class. Putting this here
109 // because HeapStorage is private so it can't be at namespace level.
110 static_assert(sizeof(HeapStorage) <= 64,
111 "IOBuf may not grow over 56 bytes!");
115 folly::max_align_t align;
118 IOBuf::SharedInfo::SharedInfo()
121 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
122 // no other threads should be referring to it yet.
123 refcount.store(1, std::memory_order_relaxed);
126 IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
129 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
130 // no other threads should be referring to it yet.
131 refcount.store(1, std::memory_order_relaxed);
134 void* IOBuf::operator new(size_t size) {
135 size_t fullSize = offsetof(HeapStorage, buf) + size;
136 auto* storage = static_cast<HeapStorage*>(malloc(fullSize));
137 // operator new is not allowed to return nullptr
138 if (UNLIKELY(storage == nullptr)) {
139 throw std::bad_alloc();
142 new (&storage->prefix) HeapPrefix(kIOBufInUse);
143 return &(storage->buf);
146 void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; }
148 void IOBuf::operator delete(void* ptr) {
149 auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
150 auto* storage = reinterpret_cast<HeapStorage*>(storageAddr);
151 releaseStorage(storage, kIOBufInUse);
154 void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) {
155 CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic));
157 // Use relaxed memory order here. If we are unlucky and happen to get
158 // out-of-date data the compare_exchange_weak() call below will catch
159 // it and load new data with memory_order_acq_rel.
160 auto flags = storage->prefix.flags.load(std::memory_order_acquire);
161 DCHECK_EQ((flags & freeFlags), freeFlags);
164 uint16_t newFlags = uint16_t(flags & ~freeFlags);
166 // The storage space is now unused. Free it.
167 storage->prefix.HeapPrefix::~HeapPrefix();
172 // This storage segment still contains portions that are in use.
173 // Just clear the flags specified in freeFlags for now.
174 auto ret = storage->prefix.flags.compare_exchange_weak(
175 flags, newFlags, std::memory_order_acq_rel);
177 // We successfully updated the flags.
181 // We failed to update the flags. Some other thread probably updated them
182 // and cleared some of the other bits. Continue around the loop to see if
183 // we are the last user now, or if we need to try updating the flags again.
187 void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
188 auto* storage = static_cast<HeapStorage*>(userData);
189 releaseStorage(storage, kDataInUse);
192 IOBuf::IOBuf(CreateOp, uint64_t capacity)
197 flagsAndSharedInfo_(0) {
199 allocExtBuffer(capacity, &buf_, &info, &capacity_);
204 IOBuf::IOBuf(CopyBufferOp /* op */,
208 uint64_t minTailroom)
209 : IOBuf(CREATE, headroom + size + minTailroom) {
212 assert(buf != nullptr);
213 memcpy(writableData(), buf, size);
218 IOBuf::IOBuf(CopyBufferOp op, ByteRange br,
219 uint64_t headroom, uint64_t minTailroom)
220 : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {
223 unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
224 // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
225 // all with a single allocation.
227 // We don't do this for larger buffers since it can be wasteful if the user
228 // needs to reallocate the buffer but keeps using the same IOBuf object.
229 // In this case we can't free the data space until the IOBuf is also
230 // destroyed. Callers can explicitly call createCombined() or
231 // createSeparate() if they know their use case better, and know if they are
232 // likely to reallocate the buffer later.
233 if (capacity <= kDefaultCombinedBufSize) {
234 return createCombined(capacity);
236 return createSeparate(capacity);
239 unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
240 // To save a memory allocation, allocate space for the IOBuf object, the
241 // SharedInfo struct, and the data itself all with a single call to malloc().
242 size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
243 size_t mallocSize = goodMallocSize(requiredStorage);
244 auto* storage = static_cast<HeapFullStorage*>(malloc(mallocSize));
246 new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse);
247 new (&storage->shared) SharedInfo(freeInternalBuf, storage);
249 uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
250 uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
251 size_t actualCapacity = size_t(storageEnd - bufAddr);
252 unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
253 InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
254 bufAddr, actualCapacity, bufAddr, 0));
258 unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
259 return std::make_unique<IOBuf>(CREATE, capacity);
262 unique_ptr<IOBuf> IOBuf::createChain(
263 size_t totalCapacity, uint64_t maxBufCapacity) {
264 unique_ptr<IOBuf> out = create(
265 std::min(totalCapacity, size_t(maxBufCapacity)));
266 size_t allocatedCapacity = out->capacity();
268 while (allocatedCapacity < totalCapacity) {
269 unique_ptr<IOBuf> newBuf = create(
270 std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity)));
271 allocatedCapacity += newBuf->capacity();
272 out->prependChain(std::move(newBuf));
278 IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
279 FreeFunction freeFn, void* userData,
283 data_(static_cast<uint8_t*>(buf)),
284 buf_(static_cast<uint8_t*>(buf)),
287 flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
289 setSharedInfo(new SharedInfo(freeFn, userData));
291 takeOwnershipError(freeOnError, buf, freeFn, userData);
296 unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
302 // TODO: We could allocate the IOBuf object and SharedInfo all in a single
303 // memory allocation. We could use the existing HeapStorage class, and
304 // define a new kSharedInfoInUse flag. We could change our code to call
305 // releaseStorage(kFlagFreeSharedInfo) when this kFlagFreeSharedInfo,
306 // rather than directly calling delete.
308 // Note that we always pass freeOnError as false to the constructor.
309 // If the constructor throws we'll handle it below. (We have to handle
310 // allocation failures from std::make_unique too.)
311 return std::make_unique<IOBuf>(
312 TAKE_OWNERSHIP, buf, capacity, length, freeFn, userData, false);
314 takeOwnershipError(freeOnError, buf, freeFn, userData);
319 IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
320 : IOBuf(InternalConstructor(), 0,
321 // We cast away the const-ness of the buffer here.
322 // This is okay since IOBuf users must use unshare() to create a copy
323 // of this buffer before writing to the buffer.
324 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
325 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {
328 IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
329 : IOBuf(op, br.data(), br.size()) {
332 unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
333 return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
336 IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) {
337 return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
340 IOBuf::IOBuf() noexcept {
343 IOBuf::IOBuf(IOBuf&& other) noexcept
344 : data_(other.data_),
346 length_(other.length_),
347 capacity_(other.capacity_),
348 flagsAndSharedInfo_(other.flagsAndSharedInfo_) {
349 // Reset other so it is a clean state to be destroyed.
350 other.data_ = nullptr;
351 other.buf_ = nullptr;
354 other.flagsAndSharedInfo_ = 0;
356 // If other was part of the chain, assume ownership of the rest of its chain.
357 // (It's only valid to perform move assignment on the head of a chain.)
358 if (other.next_ != &other) {
361 other.next_ = &other;
365 other.prev_ = &other;
368 // Sanity check to make sure that other is in a valid state to be destroyed.
369 DCHECK_EQ(other.prev_, &other);
370 DCHECK_EQ(other.next_, &other);
373 IOBuf::IOBuf(const IOBuf& other) {
374 *this = other.cloneAsValue();
377 IOBuf::IOBuf(InternalConstructor,
378 uintptr_t flagsAndSharedInfo,
389 flagsAndSharedInfo_(flagsAndSharedInfo) {
391 assert(data + length <= buf + capacity);
395 // Destroying an IOBuf destroys the entire chain.
396 // Users of IOBuf should only explicitly delete the head of any chain.
397 // The other elements in the chain will be automatically destroyed.
398 while (next_ != this) {
399 // Since unlink() returns unique_ptr() and we don't store it,
400 // it will automatically delete the unlinked element.
401 (void)next_->unlink();
407 IOBuf& IOBuf::operator=(IOBuf&& other) noexcept {
408 if (this == &other) {
412 // If we are part of a chain, delete the rest of the chain.
413 while (next_ != this) {
414 // Since unlink() returns unique_ptr() and we don't store it,
415 // it will automatically delete the unlinked element.
416 (void)next_->unlink();
419 // Decrement our refcount on the current buffer
422 // Take ownership of the other buffer's data
425 length_ = other.length_;
426 capacity_ = other.capacity_;
427 flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
428 // Reset other so it is a clean state to be destroyed.
429 other.data_ = nullptr;
430 other.buf_ = nullptr;
433 other.flagsAndSharedInfo_ = 0;
435 // If other was part of the chain, assume ownership of the rest of its chain.
436 // (It's only valid to perform move assignment on the head of a chain.)
437 if (other.next_ != &other) {
440 other.next_ = &other;
444 other.prev_ = &other;
447 // Sanity check to make sure that other is in a valid state to be destroyed.
448 DCHECK_EQ(other.prev_, &other);
449 DCHECK_EQ(other.next_, &other);
454 IOBuf& IOBuf::operator=(const IOBuf& other) {
455 if (this != &other) {
456 *this = IOBuf(other);
461 bool IOBuf::empty() const {
462 const IOBuf* current = this;
464 if (current->length() != 0) {
467 current = current->next_;
468 } while (current != this);
472 size_t IOBuf::countChainElements() const {
473 size_t numElements = 1;
474 for (IOBuf* current = next_; current != this; current = current->next_) {
480 uint64_t IOBuf::computeChainDataLength() const {
481 uint64_t fullLength = length_;
482 for (IOBuf* current = next_; current != this; current = current->next_) {
483 fullLength += current->length_;
488 void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
489 // Take ownership of the specified IOBuf
490 IOBuf* other = iobuf.release();
492 // Remember the pointer to the tail of the other chain
493 IOBuf* otherTail = other->prev_;
495 // Hook up prev_->next_ to point at the start of the other chain,
496 // and other->prev_ to point at prev_
497 prev_->next_ = other;
498 other->prev_ = prev_;
500 // Hook up otherTail->next_ to point at us,
501 // and prev_ to point back at otherTail,
502 otherTail->next_ = this;
506 unique_ptr<IOBuf> IOBuf::clone() const {
507 return std::make_unique<IOBuf>(cloneAsValue());
510 unique_ptr<IOBuf> IOBuf::cloneOne() const {
511 return std::make_unique<IOBuf>(cloneOneAsValue());
514 unique_ptr<IOBuf> IOBuf::cloneCoalesced() const {
515 return std::make_unique<IOBuf>(cloneCoalescedAsValue());
518 IOBuf IOBuf::cloneAsValue() const {
519 auto tmp = cloneOneAsValue();
521 for (IOBuf* current = next_; current != this; current = current->next_) {
522 tmp.prependChain(current->cloneOne());
528 IOBuf IOBuf::cloneOneAsValue() const {
529 if (SharedInfo* info = sharedInfo()) {
530 setFlags(kFlagMaybeShared);
531 info->refcount.fetch_add(1, std::memory_order_acq_rel);
534 InternalConstructor(),
542 IOBuf IOBuf::cloneCoalescedAsValue() const {
544 return cloneOneAsValue();
546 // Coalesce into newBuf
547 const uint64_t newLength = computeChainDataLength();
548 const uint64_t newHeadroom = headroom();
549 const uint64_t newTailroom = prev()->tailroom();
550 const uint64_t newCapacity = newLength + newHeadroom + newTailroom;
551 IOBuf newBuf{CREATE, newCapacity};
552 newBuf.advance(newHeadroom);
556 if (current->length() > 0) {
557 DCHECK_NOTNULL(current->data());
558 DCHECK_LE(current->length(), newBuf.tailroom());
559 memcpy(newBuf.writableTail(), current->data(), current->length());
560 newBuf.append(current->length());
562 current = current->next();
563 } while (current != this);
565 DCHECK_EQ(newLength, newBuf.length());
566 DCHECK_EQ(newHeadroom, newBuf.headroom());
567 DCHECK_LE(newTailroom, newBuf.tailroom());
572 void IOBuf::unshareOneSlow() {
573 // Allocate a new buffer for the data
575 SharedInfo* sharedInfo;
576 uint64_t actualCapacity;
577 allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
580 // Maintain the same amount of headroom. Since we maintained the same
581 // minimum capacity we also maintain at least the same amount of tailroom.
582 uint64_t headlen = headroom();
584 assert(data_ != nullptr);
585 memcpy(buf + headlen, data_, length_);
588 // Release our reference on the old buffer
590 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
591 setFlagsAndSharedInfo(0, sharedInfo);
593 // Update the buffer pointers to point to the new buffer
594 data_ = buf + headlen;
598 void IOBuf::unshareChained() {
599 // unshareChained() should only be called if we are part of a chain of
600 // multiple IOBufs. The caller should have already verified this.
603 IOBuf* current = this;
605 if (current->isSharedOne()) {
606 // we have to unshare
610 current = current->next_;
611 if (current == this) {
612 // None of the IOBufs in the chain are shared,
613 // so return without doing anything
618 // We have to unshare. Let coalesceSlow() do the work.
622 void IOBuf::markExternallyShared() {
623 IOBuf* current = this;
625 current->markExternallySharedOne();
626 current = current->next_;
627 } while (current != this);
630 void IOBuf::makeManagedChained() {
633 IOBuf* current = this;
635 current->makeManagedOne();
636 current = current->next_;
637 if (current == this) {
643 void IOBuf::coalesceSlow() {
644 // coalesceSlow() should only be called if we are part of a chain of multiple
645 // IOBufs. The caller should have already verified this.
648 // Compute the length of the entire chain
649 uint64_t newLength = 0;
652 newLength += end->length_;
654 } while (end != this);
656 coalesceAndReallocate(newLength, end);
657 // We should be only element left in the chain now
658 DCHECK(!isChained());
661 void IOBuf::coalesceSlow(size_t maxLength) {
662 // coalesceSlow() should only be called if we are part of a chain of multiple
663 // IOBufs. The caller should have already verified this.
665 DCHECK_LT(length_, maxLength);
667 // Compute the length of the entire chain
668 uint64_t newLength = 0;
671 newLength += end->length_;
673 if (newLength >= maxLength) {
677 throw std::overflow_error("attempted to coalesce more data than "
682 coalesceAndReallocate(newLength, end);
683 // We should have the requested length now
684 DCHECK_GE(length_, maxLength);
687 void IOBuf::coalesceAndReallocate(size_t newHeadroom,
690 size_t newTailroom) {
691 uint64_t newCapacity = newLength + newHeadroom + newTailroom;
693 // Allocate space for the coalesced buffer.
694 // We always convert to an external buffer, even if we happened to be an
695 // internal buffer before.
698 uint64_t actualCapacity;
699 allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
701 // Copy the data into the new buffer
702 uint8_t* newData = newBuf + newHeadroom;
703 uint8_t* p = newData;
704 IOBuf* current = this;
705 size_t remaining = newLength;
707 if (current->length_ > 0) {
708 assert(current->length_ <= remaining);
709 assert(current->data_ != nullptr);
710 remaining -= current->length_;
711 memcpy(p, current->data_, current->length_);
712 p += current->length_;
714 current = current->next_;
715 } while (current != end);
716 assert(remaining == 0);
718 // Point at the new buffer
721 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
722 setFlagsAndSharedInfo(0, newInfo);
724 capacity_ = actualCapacity;
729 // Separate from the rest of our chain.
730 // Since we don't store the unique_ptr returned by separateChain(),
731 // this will immediately delete the returned subchain.
733 (void)separateChain(next_, current->prev_);
737 void IOBuf::decrementRefcount() {
738 // Externally owned buffers don't have a SharedInfo object and aren't managed
739 // by the reference count
740 SharedInfo* info = sharedInfo();
745 // Decrement the refcount
746 uint32_t newcnt = info->refcount.fetch_sub(
747 1, std::memory_order_acq_rel);
748 // Note that fetch_sub() returns the value before we decremented.
749 // If it is 1, we were the only remaining user; if it is greater there are
750 // still other users.
755 // We were the last user. Free the buffer
758 // Free the SharedInfo if it was allocated separately.
760 // This is only used by takeOwnership().
762 // To avoid this special case handling in decrementRefcount(), we could have
763 // takeOwnership() set a custom freeFn() that calls the user's free function
764 // then frees the SharedInfo object. (This would require that
765 // takeOwnership() store the user's free function with its allocated
766 // SharedInfo object.) However, handling this specially with a flag seems
767 // like it shouldn't be problematic.
768 if (flags() & kFlagFreeSharedInfo) {
773 void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
774 size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
775 DCHECK_LT(newCapacity, UINT32_MAX);
777 // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
778 // reallocate and free the original buffer. It should only ever be called if
779 // we are the only user of the buffer.
780 DCHECK(!isSharedOne());
782 // We'll need to reallocate the buffer.
783 // There are a few options.
784 // - If we have enough total room, move the data around in the buffer
785 // and adjust the data_ pointer.
786 // - If we're using an internal buffer, we'll switch to an external
787 // buffer with enough headroom and tailroom.
788 // - If we have enough headroom (headroom() >= minHeadroom) but not too much
789 // (so we don't waste memory), we can try one of two things, depending on
790 // whether we use jemalloc or not:
791 // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
792 // - If not using jemalloc and we don't have too much to copy,
793 // we'll use realloc() (note that realloc might have to copy
794 // headroom + data + tailroom, see smartRealloc in folly/Malloc.h)
795 // - Otherwise, bite the bullet and reallocate.
796 if (headroom() + tailroom() >= minHeadroom + minTailroom) {
797 uint8_t* newData = writableBuffer() + minHeadroom;
798 memmove(newData, data_, length_);
803 size_t newAllocatedCapacity = 0;
804 uint8_t* newBuffer = nullptr;
805 uint64_t newHeadroom = 0;
806 uint64_t oldHeadroom = headroom();
808 // If we have a buffer allocated with malloc and we just need more tailroom,
809 // try to use realloc()/xallocx() to grow the buffer in place.
810 SharedInfo* info = sharedInfo();
811 if (info && (info->freeFn == nullptr) && length_ != 0 &&
812 oldHeadroom >= minHeadroom) {
813 size_t headSlack = oldHeadroom - minHeadroom;
814 newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
815 if (usingJEMalloc()) {
816 // We assume that tailroom is more useful and more important than
817 // headroom (not least because realloc / xallocx allow us to grow the
818 // buffer at the tail, but not at the head) So, if we have more headroom
819 // than we need, we consider that "wasted". We arbitrarily define "too
820 // much" headroom to be 25% of the capacity.
821 if (headSlack * 4 <= newCapacity) {
822 size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
824 if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
825 if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
826 newBuffer = static_cast<uint8_t*>(p);
827 newHeadroom = oldHeadroom;
829 // if xallocx failed, do nothing, fall back to malloc/memcpy/free
832 } else { // Not using jemalloc
833 size_t copySlack = capacity() - length_;
834 if (copySlack * 2 <= length_) {
835 void* p = realloc(buf_, newAllocatedCapacity);
836 if (UNLIKELY(p == nullptr)) {
837 throw std::bad_alloc();
839 newBuffer = static_cast<uint8_t*>(p);
840 newHeadroom = oldHeadroom;
845 // None of the previous reallocation strategies worked (or we're using
846 // an internal buffer). malloc/copy/free.
847 if (newBuffer == nullptr) {
848 newAllocatedCapacity = goodExtBufferSize(newCapacity);
849 void* p = malloc(newAllocatedCapacity);
850 if (UNLIKELY(p == nullptr)) {
851 throw std::bad_alloc();
853 newBuffer = static_cast<uint8_t*>(p);
855 assert(data_ != nullptr);
856 memcpy(newBuffer + minHeadroom, data_, length_);
861 newHeadroom = minHeadroom;
865 initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
867 if (flags() & kFlagFreeSharedInfo) {
871 setFlagsAndSharedInfo(0, info);
874 data_ = newBuffer + newHeadroom;
875 // length_ is unchanged
878 void IOBuf::freeExtBuffer() {
879 SharedInfo* info = sharedInfo();
884 info->freeFn(buf_, info->userData);
886 // The user's free function should never throw. Otherwise we might
887 // throw from the IOBuf destructor. Other code paths like coalesce()
888 // also assume that decrementRefcount() cannot throw.
896 void IOBuf::allocExtBuffer(uint64_t minCapacity,
898 SharedInfo** infoReturn,
899 uint64_t* capacityReturn) {
900 size_t mallocSize = goodExtBufferSize(minCapacity);
901 uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
902 if (UNLIKELY(buf == nullptr)) {
903 throw std::bad_alloc();
905 initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
909 size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
910 // Determine how much space we should allocate. We'll store the SharedInfo
911 // for the external buffer just after the buffer itself. (We store it just
912 // after the buffer rather than just before so that the code can still just
913 // use free(buf_) to free the buffer.)
914 size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
915 // Add room for padding so that the SharedInfo will be aligned on an 8-byte
917 minSize = (minSize + 7) & ~7;
919 // Use goodMallocSize() to bump up the capacity to a decent size to request
920 // from malloc, so we can use all of the space that malloc will probably give
922 return goodMallocSize(minSize);
925 void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
926 SharedInfo** infoReturn,
927 uint64_t* capacityReturn) {
928 // Find the SharedInfo storage at the end of the buffer
929 // and construct the SharedInfo.
930 uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
931 SharedInfo* sharedInfo = new(infoStart) SharedInfo;
933 *capacityReturn = uint64_t(infoStart - buf);
934 *infoReturn = sharedInfo;
937 fbstring IOBuf::moveToFbString() {
938 // malloc-allocated buffers are just fine, everything else needs
939 // to be turned into one.
940 if (!sharedInfo() || // user owned, not ours to give up
941 sharedInfo()->freeFn || // not malloc()-ed
942 headroom() != 0 || // malloc()-ed block doesn't start at beginning
943 tailroom() == 0 || // no room for NUL terminator
944 isShared() || // shared
945 isChained()) { // chained
946 // We might as well get rid of all head and tailroom if we're going
947 // to reallocate; we need 1 byte for NUL terminator.
948 coalesceAndReallocate(0, computeChainDataLength(), this, 1);
951 // Ensure NUL terminated
953 fbstring str(reinterpret_cast<char*>(writableData()),
954 length(), capacity(),
955 AcquireMallocatedString());
957 if (flags() & kFlagFreeSharedInfo) {
961 // Reset to a state where we can be deleted cleanly
962 flagsAndSharedInfo_ = 0;
968 IOBuf::Iterator IOBuf::cbegin() const {
969 return Iterator(this, this);
972 IOBuf::Iterator IOBuf::cend() const {
973 return Iterator(nullptr, nullptr);
976 folly::fbvector<struct iovec> IOBuf::getIov() const {
977 folly::fbvector<struct iovec> iov;
978 iov.reserve(countChainElements());
983 void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const {
984 IOBuf const* p = this;
986 // some code can get confused by empty iovs, so skip them
987 if (p->length() > 0) {
988 iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())});
994 size_t IOBuf::fillIov(struct iovec* iov, size_t len) const {
995 IOBuf const* p = this;
998 // some code can get confused by empty iovs, so skip them
999 if (p->length() > 0) {
1000 iov[i].iov_base = const_cast<uint8_t*>(p->data());
1001 iov[i].iov_len = p->length();
1012 size_t IOBufHash::operator()(const IOBuf& buf) const {
1013 folly::hash::SpookyHashV2 hasher;
1015 io::Cursor cursor(&buf);
1017 auto b = cursor.peekBytes();
1021 hasher.Update(b.data(), b.size());
1022 cursor.skip(b.size());
1026 hasher.Final(&h1, &h2);
1030 bool IOBufEqual::operator()(const IOBuf& a, const IOBuf& b) const {
1034 auto ba = ca.peekBytes();
1035 auto bb = cb.peekBytes();
1036 if (ba.empty() && bb.empty()) {
1038 } else if (ba.empty() || bb.empty()) {
1041 size_t n = std::min(ba.size(), bb.size());
1043 if (memcmp(ba.data(), bb.data(), n)) {
1051 } // namespace folly