2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #define __STDC_LIMIT_MACROS
19 #include "folly/io/IOBuf.h"
21 #include "folly/Conv.h"
22 #include "folly/Likely.h"
23 #include "folly/Malloc.h"
24 #include "folly/Memory.h"
25 #include "folly/ScopeGuard.h"
32 using std::unique_ptr;
38 // This memory segment contains an IOBuf that is still in use
40 // This memory segment contains buffer data that is still in use
45 // When create() is called for buffers less than kDefaultCombinedBufSize,
46 // we allocate a single combined memory segment for the IOBuf and the data
47 // together. See the comments for createCombined()/createSeparate() for more
50 // (The size of 1k is largely just a guess here. We could could probably do
51 // benchmarks of real applications to see if adjusting this number makes a
52 // difference. Callers that know their exact use case can also explicitly
53 // call createCombined() or createSeparate().)
54 kDefaultCombinedBufSize = 1024
57 // Helper function for IOBuf::takeOwnership()
58 void takeOwnershipError(bool freeOnError, void* buf,
59 folly::IOBuf::FreeFunction freeFn,
69 freeFn(buf, userData);
71 // The user's free function is not allowed to throw.
72 // (We are already in the middle of throwing an exception, so
73 // we cannot let this exception go unhandled.)
78 } // unnamed namespace
82 struct IOBuf::HeapPrefix {
83 HeapPrefix(uint16_t flg)
87 // Reset magic to 0 on destruction. This is solely for debugging purposes
88 // to help catch bugs where someone tries to use HeapStorage after it has
94 std::atomic<uint16_t> flags;
97 struct IOBuf::HeapStorage {
99 // The IOBuf is last in the HeapStorage object.
100 // This way operator new will work even if allocating a subclass of IOBuf
101 // that requires more space.
105 struct IOBuf::HeapFullStorage {
106 // Make sure jemalloc allocates from the 64-byte class. Putting this here
107 // because HeapStorage is private so it can't be at namespace level.
108 static_assert(sizeof(HeapStorage) <= 64,
109 "IOBuf may not grow over 56 bytes!");
116 IOBuf::SharedInfo::SharedInfo()
119 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
120 // no other threads should be referring to it yet.
121 refcount.store(1, std::memory_order_relaxed);
124 IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
127 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
128 // no other threads should be referring to it yet.
129 refcount.store(1, std::memory_order_relaxed);
132 void* IOBuf::operator new(size_t size) {
133 size_t fullSize = offsetof(HeapStorage, buf) + size;
134 auto* storage = static_cast<HeapStorage*>(malloc(fullSize));
135 // operator new is not allowed to return NULL
136 if (UNLIKELY(storage == nullptr)) {
137 throw std::bad_alloc();
140 new (&storage->prefix) HeapPrefix(kIOBufInUse);
141 return &(storage->buf);
144 void* IOBuf::operator new(size_t size, void* ptr) {
148 void IOBuf::operator delete(void* ptr) {
149 auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
150 auto* storage = reinterpret_cast<HeapStorage*>(storageAddr);
151 releaseStorage(storage, kIOBufInUse);
154 void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) {
155 CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic));
157 // Use relaxed memory order here. If we are unlucky and happen to get
158 // out-of-date data the compare_exchange_weak() call below will catch
159 // it and load new data with memory_order_acq_rel.
160 auto flags = storage->prefix.flags.load(std::memory_order_acquire);
161 DCHECK_EQ((flags & freeFlags), freeFlags);
164 uint16_t newFlags = (flags & ~freeFlags);
166 // The storage space is now unused. Free it.
167 storage->prefix.HeapPrefix::~HeapPrefix();
172 // This storage segment still contains portions that are in use.
173 // Just clear the flags specified in freeFlags for now.
174 auto ret = storage->prefix.flags.compare_exchange_weak(
175 flags, newFlags, std::memory_order_acq_rel);
177 // We successfully updated the flags.
181 // We failed to update the flags. Some other thread probably updated them
182 // and cleared some of the other bits. Continue around the loop to see if
183 // we are the last user now, or if we need to try updating the flags again.
187 void IOBuf::freeInternalBuf(void* buf, void* userData) {
188 auto* storage = static_cast<HeapStorage*>(userData);
189 releaseStorage(storage, kDataInUse);
192 IOBuf::IOBuf(CreateOp, uint64_t capacity)
197 flagsAndSharedInfo_(0) {
199 allocExtBuffer(capacity, &buf_, &info, &capacity_);
204 IOBuf::IOBuf(CopyBufferOp op, const void* buf, uint64_t size,
205 uint64_t headroom, uint64_t minTailroom)
206 : IOBuf(CREATE, headroom + size + minTailroom) {
208 memcpy(writableData(), buf, size);
212 IOBuf::IOBuf(CopyBufferOp op, ByteRange br,
213 uint64_t headroom, uint64_t minTailroom)
214 : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {
217 unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
218 // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
219 // all with a single allocation.
221 // We don't do this for larger buffers since it can be wasteful if the user
222 // needs to reallocate the buffer but keeps using the same IOBuf object.
223 // In this case we can't free the data space until the IOBuf is also
224 // destroyed. Callers can explicitly call createCombined() or
225 // createSeparate() if they know their use case better, and know if they are
226 // likely to reallocate the buffer later.
227 if (capacity <= kDefaultCombinedBufSize) {
228 return createCombined(capacity);
230 return createSeparate(capacity);
233 unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
234 // To save a memory allocation, allocate space for the IOBuf object, the
235 // SharedInfo struct, and the data itself all with a single call to malloc().
236 size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
237 size_t mallocSize = goodMallocSize(requiredStorage);
238 auto* storage = static_cast<HeapFullStorage*>(malloc(mallocSize));
240 new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse);
241 new (&storage->shared) SharedInfo(freeInternalBuf, storage);
243 uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
244 uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
245 size_t actualCapacity = storageEnd - bufAddr;
246 unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
247 InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
248 bufAddr, actualCapacity, bufAddr, 0));
252 unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
253 return make_unique<IOBuf>(CREATE, capacity);
256 unique_ptr<IOBuf> IOBuf::createChain(
257 size_t totalCapacity, uint64_t maxBufCapacity) {
258 unique_ptr<IOBuf> out = create(
259 std::min(totalCapacity, size_t(maxBufCapacity)));
260 size_t allocatedCapacity = out->capacity();
262 while (allocatedCapacity < totalCapacity) {
263 unique_ptr<IOBuf> newBuf = create(
264 std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity)));
265 allocatedCapacity += newBuf->capacity();
266 out->prependChain(std::move(newBuf));
272 IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
273 FreeFunction freeFn, void* userData,
277 data_(static_cast<uint8_t*>(buf)),
278 buf_(static_cast<uint8_t*>(buf)),
281 flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
283 setSharedInfo(new SharedInfo(freeFn, userData));
285 takeOwnershipError(freeOnError, buf, freeFn, userData);
290 unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
296 // TODO: We could allocate the IOBuf object and SharedInfo all in a single
297 // memory allocation. We could use the existing HeapStorage class, and
298 // define a new kSharedInfoInUse flag. We could change our code to call
299 // releaseStorage(kFlagFreeSharedInfo) when this kFlagFreeSharedInfo,
300 // rather than directly calling delete.
302 // Note that we always pass freeOnError as false to the constructor.
303 // If the constructor throws we'll handle it below. (We have to handle
304 // allocation failures from make_unique too.)
305 return make_unique<IOBuf>(TAKE_OWNERSHIP, buf, capacity, length,
306 freeFn, userData, false);
308 takeOwnershipError(freeOnError, buf, freeFn, userData);
313 IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
314 : IOBuf(InternalConstructor(), 0,
315 // We cast away the const-ness of the buffer here.
316 // This is okay since IOBuf users must use unshare() to create a copy
317 // of this buffer before writing to the buffer.
318 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
319 static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {
322 IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
323 : IOBuf(op, br.data(), br.size()) {
326 unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
327 return make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
330 IOBuf::IOBuf() noexcept {
333 IOBuf::IOBuf(IOBuf&& other) noexcept {
334 *this = std::move(other);
337 IOBuf::IOBuf(InternalConstructor,
338 uintptr_t flagsAndSharedInfo,
349 flagsAndSharedInfo_(flagsAndSharedInfo) {
351 assert(data + length <= buf + capacity);
355 // Destroying an IOBuf destroys the entire chain.
356 // Users of IOBuf should only explicitly delete the head of any chain.
357 // The other elements in the chain will be automatically destroyed.
358 while (next_ != this) {
359 // Since unlink() returns unique_ptr() and we don't store it,
360 // it will automatically delete the unlinked element.
361 (void)next_->unlink();
367 IOBuf& IOBuf::operator=(IOBuf&& other) noexcept {
368 if (this == &other) {
372 // If we are part of a chain, delete the rest of the chain.
373 while (next_ != this) {
374 // Since unlink() returns unique_ptr() and we don't store it,
375 // it will automatically delete the unlinked element.
376 (void)next_->unlink();
379 // Decrement our refcount on the current buffer
382 // Take ownership of the other buffer's data
385 length_ = other.length_;
386 capacity_ = other.capacity_;
387 flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
388 // Reset other so it is a clean state to be destroyed.
389 other.data_ = nullptr;
390 other.buf_ = nullptr;
393 other.flagsAndSharedInfo_ = 0;
395 // If other was part of the chain, assume ownership of the rest of its chain.
396 // (It's only valid to perform move assignment on the head of a chain.)
397 if (other.next_ != &other) {
400 other.next_ = &other;
404 other.prev_ = &other;
407 // Sanity check to make sure that other is in a valid state to be destroyed.
408 DCHECK_EQ(other.prev_, &other);
409 DCHECK_EQ(other.next_, &other);
414 bool IOBuf::empty() const {
415 const IOBuf* current = this;
417 if (current->length() != 0) {
420 current = current->next_;
421 } while (current != this);
425 size_t IOBuf::countChainElements() const {
426 size_t numElements = 1;
427 for (IOBuf* current = next_; current != this; current = current->next_) {
433 uint64_t IOBuf::computeChainDataLength() const {
434 uint64_t fullLength = length_;
435 for (IOBuf* current = next_; current != this; current = current->next_) {
436 fullLength += current->length_;
441 void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
442 // Take ownership of the specified IOBuf
443 IOBuf* other = iobuf.release();
445 // Remember the pointer to the tail of the other chain
446 IOBuf* otherTail = other->prev_;
448 // Hook up prev_->next_ to point at the start of the other chain,
449 // and other->prev_ to point at prev_
450 prev_->next_ = other;
451 other->prev_ = prev_;
453 // Hook up otherTail->next_ to point at us,
454 // and prev_ to point back at otherTail,
455 otherTail->next_ = this;
459 unique_ptr<IOBuf> IOBuf::clone() const {
460 unique_ptr<IOBuf> ret = make_unique<IOBuf>();
465 unique_ptr<IOBuf> IOBuf::cloneOne() const {
466 unique_ptr<IOBuf> ret = make_unique<IOBuf>();
471 void IOBuf::cloneInto(IOBuf& other) const {
475 for (IOBuf* current = next_; current != this; current = current->next_) {
476 tmp.prependChain(current->cloneOne());
479 other = std::move(tmp);
482 void IOBuf::cloneOneInto(IOBuf& other) const {
483 SharedInfo* info = sharedInfo();
485 setFlags(kFlagMaybeShared);
487 other = IOBuf(InternalConstructor(),
488 flagsAndSharedInfo_, buf_, capacity_,
491 info->refcount.fetch_add(1, std::memory_order_acq_rel);
495 void IOBuf::unshareOneSlow() {
496 // Allocate a new buffer for the data
498 SharedInfo* sharedInfo;
499 uint64_t actualCapacity;
500 allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
503 // Maintain the same amount of headroom. Since we maintained the same
504 // minimum capacity we also maintain at least the same amount of tailroom.
505 uint64_t headlen = headroom();
506 memcpy(buf + headlen, data_, length_);
508 // Release our reference on the old buffer
510 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
511 setFlagsAndSharedInfo(0, sharedInfo);
513 // Update the buffer pointers to point to the new buffer
514 data_ = buf + headlen;
518 void IOBuf::unshareChained() {
519 // unshareChained() should only be called if we are part of a chain of
520 // multiple IOBufs. The caller should have already verified this.
523 IOBuf* current = this;
525 if (current->isSharedOne()) {
526 // we have to unshare
530 current = current->next_;
531 if (current == this) {
532 // None of the IOBufs in the chain are shared,
533 // so return without doing anything
538 // We have to unshare. Let coalesceSlow() do the work.
542 void IOBuf::coalesceSlow() {
543 // coalesceSlow() should only be called if we are part of a chain of multiple
544 // IOBufs. The caller should have already verified this.
547 // Compute the length of the entire chain
548 uint64_t newLength = 0;
551 newLength += end->length_;
553 } while (end != this);
555 coalesceAndReallocate(newLength, end);
556 // We should be only element left in the chain now
557 DCHECK(!isChained());
560 void IOBuf::coalesceSlow(size_t maxLength) {
561 // coalesceSlow() should only be called if we are part of a chain of multiple
562 // IOBufs. The caller should have already verified this.
564 DCHECK_LT(length_, maxLength);
566 // Compute the length of the entire chain
567 uint64_t newLength = 0;
570 newLength += end->length_;
572 if (newLength >= maxLength) {
576 throw std::overflow_error("attempted to coalesce more data than "
581 coalesceAndReallocate(newLength, end);
582 // We should have the requested length now
583 DCHECK_GE(length_, maxLength);
586 void IOBuf::coalesceAndReallocate(size_t newHeadroom,
589 size_t newTailroom) {
590 uint64_t newCapacity = newLength + newHeadroom + newTailroom;
591 if (newCapacity > UINT32_MAX) {
592 throw std::overflow_error("IOBuf chain too large to coalesce");
595 // Allocate space for the coalesced buffer.
596 // We always convert to an external buffer, even if we happened to be an
597 // internal buffer before.
600 uint64_t actualCapacity;
601 allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
603 // Copy the data into the new buffer
604 uint8_t* newData = newBuf + newHeadroom;
605 uint8_t* p = newData;
606 IOBuf* current = this;
607 size_t remaining = newLength;
609 assert(current->length_ <= remaining);
610 remaining -= current->length_;
611 memcpy(p, current->data_, current->length_);
612 p += current->length_;
613 current = current->next_;
614 } while (current != end);
615 assert(remaining == 0);
617 // Point at the new buffer
620 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
621 setFlagsAndSharedInfo(0, newInfo);
623 capacity_ = actualCapacity;
628 // Separate from the rest of our chain.
629 // Since we don't store the unique_ptr returned by separateChain(),
630 // this will immediately delete the returned subchain.
632 (void)separateChain(next_, current->prev_);
636 void IOBuf::decrementRefcount() {
637 // Externally owned buffers don't have a SharedInfo object and aren't managed
638 // by the reference count
639 SharedInfo* info = sharedInfo();
644 // Decrement the refcount
645 uint32_t newcnt = info->refcount.fetch_sub(
646 1, std::memory_order_acq_rel);
647 // Note that fetch_sub() returns the value before we decremented.
648 // If it is 1, we were the only remaining user; if it is greater there are
649 // still other users.
654 // We were the last user. Free the buffer
657 // Free the SharedInfo if it was allocated separately.
659 // This is only used by takeOwnership().
661 // To avoid this special case handling in decrementRefcount(), we could have
662 // takeOwnership() set a custom freeFn() that calls the user's free function
663 // then frees the SharedInfo object. (This would require that
664 // takeOwnership() store the user's free function with its allocated
665 // SharedInfo object.) However, handling this specially with a flag seems
666 // like it shouldn't be problematic.
667 if (flags() & kFlagFreeSharedInfo) {
672 void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
673 size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
674 DCHECK_LT(newCapacity, UINT32_MAX);
676 // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
677 // reallocate and free the original buffer. It should only ever be called if
678 // we are the only user of the buffer.
679 DCHECK(!isSharedOne());
681 // We'll need to reallocate the buffer.
682 // There are a few options.
683 // - If we have enough total room, move the data around in the buffer
684 // and adjust the data_ pointer.
685 // - If we're using an internal buffer, we'll switch to an external
686 // buffer with enough headroom and tailroom.
687 // - If we have enough headroom (headroom() >= minHeadroom) but not too much
688 // (so we don't waste memory), we can try one of two things, depending on
689 // whether we use jemalloc or not:
690 // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
691 // - If not using jemalloc and we don't have too much to copy,
692 // we'll use realloc() (note that realloc might have to copy
693 // headroom + data + tailroom, see smartRealloc in folly/Malloc.h)
694 // - Otherwise, bite the bullet and reallocate.
695 if (headroom() + tailroom() >= minHeadroom + minTailroom) {
696 uint8_t* newData = writableBuffer() + minHeadroom;
697 memmove(newData, data_, length_);
702 size_t newAllocatedCapacity = goodExtBufferSize(newCapacity);
703 uint8_t* newBuffer = nullptr;
704 uint64_t newHeadroom = 0;
705 uint64_t oldHeadroom = headroom();
707 // If we have a buffer allocated with malloc and we just need more tailroom,
708 // try to use realloc()/rallocm() to grow the buffer in place.
709 SharedInfo* info = sharedInfo();
710 if (info && (info->freeFn == nullptr) && length_ != 0 &&
711 oldHeadroom >= minHeadroom) {
712 if (usingJEMalloc()) {
713 size_t headSlack = oldHeadroom - minHeadroom;
714 // We assume that tailroom is more useful and more important than
715 // headroom (not least because realloc / rallocm allow us to grow the
716 // buffer at the tail, but not at the head) So, if we have more headroom
717 // than we need, we consider that "wasted". We arbitrarily define "too
718 // much" headroom to be 25% of the capacity.
719 if (headSlack * 4 <= newCapacity) {
720 size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
722 if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
723 // rallocm can write to its 2nd arg even if it returns
724 // ALLOCM_ERR_NOT_MOVED. So, we pass a temporary to its 2nd arg and
725 // update newAllocatedCapacity only on success.
726 size_t allocatedSize;
727 int r = rallocm(&p, &allocatedSize, newAllocatedCapacity,
729 if (r == ALLOCM_SUCCESS) {
730 newBuffer = static_cast<uint8_t*>(p);
731 newHeadroom = oldHeadroom;
732 newAllocatedCapacity = allocatedSize;
733 } else if (r == ALLOCM_ERR_OOM) {
734 // shouldn't happen as we don't actually allocate new memory
735 // (due to ALLOCM_NO_MOVE)
736 throw std::bad_alloc();
738 // if ALLOCM_ERR_NOT_MOVED, do nothing, fall back to
739 // malloc/memcpy/free
742 } else { // Not using jemalloc
743 size_t copySlack = capacity() - length_;
744 if (copySlack * 2 <= length_) {
745 void* p = realloc(buf_, newAllocatedCapacity);
746 if (UNLIKELY(p == nullptr)) {
747 throw std::bad_alloc();
749 newBuffer = static_cast<uint8_t*>(p);
750 newHeadroom = oldHeadroom;
755 // None of the previous reallocation strategies worked (or we're using
756 // an internal buffer). malloc/copy/free.
757 if (newBuffer == nullptr) {
758 void* p = malloc(newAllocatedCapacity);
759 if (UNLIKELY(p == nullptr)) {
760 throw std::bad_alloc();
762 newBuffer = static_cast<uint8_t*>(p);
763 memcpy(newBuffer + minHeadroom, data_, length_);
767 newHeadroom = minHeadroom;
771 initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
773 if (flags() & kFlagFreeSharedInfo) {
777 setFlagsAndSharedInfo(0, info);
780 data_ = newBuffer + newHeadroom;
781 // length_ is unchanged
784 void IOBuf::freeExtBuffer() {
785 SharedInfo* info = sharedInfo();
790 info->freeFn(buf_, info->userData);
792 // The user's free function should never throw. Otherwise we might
793 // throw from the IOBuf destructor. Other code paths like coalesce()
794 // also assume that decrementRefcount() cannot throw.
802 void IOBuf::allocExtBuffer(uint64_t minCapacity,
804 SharedInfo** infoReturn,
805 uint64_t* capacityReturn) {
806 size_t mallocSize = goodExtBufferSize(minCapacity);
807 uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
808 if (UNLIKELY(buf == nullptr)) {
809 throw std::bad_alloc();
811 initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
815 size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
816 // Determine how much space we should allocate. We'll store the SharedInfo
817 // for the external buffer just after the buffer itself. (We store it just
818 // after the buffer rather than just before so that the code can still just
819 // use free(buf_) to free the buffer.)
820 size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
821 // Add room for padding so that the SharedInfo will be aligned on an 8-byte
823 minSize = (minSize + 7) & ~7;
825 // Use goodMallocSize() to bump up the capacity to a decent size to request
826 // from malloc, so we can use all of the space that malloc will probably give
828 return goodMallocSize(minSize);
831 void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
832 SharedInfo** infoReturn,
833 uint64_t* capacityReturn) {
834 // Find the SharedInfo storage at the end of the buffer
835 // and construct the SharedInfo.
836 uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
837 SharedInfo* sharedInfo = new(infoStart) SharedInfo;
839 *capacityReturn = infoStart - buf;
840 *infoReturn = sharedInfo;
843 fbstring IOBuf::moveToFbString() {
844 // malloc-allocated buffers are just fine, everything else needs
845 // to be turned into one.
846 if (!sharedInfo() || // user owned, not ours to give up
847 sharedInfo()->freeFn || // not malloc()-ed
848 headroom() != 0 || // malloc()-ed block doesn't start at beginning
849 tailroom() == 0 || // no room for NUL terminator
850 isShared() || // shared
851 isChained()) { // chained
852 // We might as well get rid of all head and tailroom if we're going
853 // to reallocate; we need 1 byte for NUL terminator.
854 coalesceAndReallocate(0, computeChainDataLength(), this, 1);
857 // Ensure NUL terminated
859 fbstring str(reinterpret_cast<char*>(writableData()),
860 length(), capacity(),
861 AcquireMallocatedString());
863 if (flags() & kFlagFreeSharedInfo) {
867 // Reset to a state where we can be deleted cleanly
868 flagsAndSharedInfo_ = 0;
874 IOBuf::Iterator IOBuf::cbegin() const {
875 return Iterator(this, this);
878 IOBuf::Iterator IOBuf::cend() const {
879 return Iterator(nullptr, nullptr);
882 folly::fbvector<struct iovec> IOBuf::getIov() const {
883 folly::fbvector<struct iovec> iov;
884 iov.reserve(countChainElements());
885 IOBuf const* p = this;
887 // some code can get confused by empty iovs, so skip them
888 if (p->length() > 0) {
889 iov.push_back({(void*)p->data(), folly::to<size_t>(p->length())});