2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <folly/io/IOBuf.h>
27 * An IOBufQueue encapsulates a chain of IOBufs and provides
28 * convenience functions to append data to the back of the chain
29 * and remove data from the front.
31 * You may also prepend data into the headroom of the first buffer in the
37 Options() : cacheChainLength(false) { }
38 bool cacheChainLength;
42 * Commonly used Options, currently the only possible value other than
45 static Options cacheChainLength() {
47 options.cacheChainLength = true;
51 explicit IOBufQueue(const Options& options = Options());
54 * Return a space to prepend bytes and the amount of headroom available.
56 std::pair<void*, uint64_t> headroom();
59 * Indicate that n bytes from the headroom have been used.
61 void markPrepended(uint64_t n);
64 * Prepend an existing range; throws std::overflow_error if not enough
67 void prepend(const void* buf, uint64_t n);
70 * Add a buffer or buffer chain to the end of this queue. The
71 * queue takes ownership of buf.
73 * If pack is true, we try to reduce wastage at the end of this queue
74 * by copying some data from the first buffers in the buf chain (and
75 * releasing the buffers), if possible. If pack is false, we leave
76 * the chain topology unchanged.
78 void append(std::unique_ptr<folly::IOBuf>&& buf,
82 * Add a queue to the end of this queue. The queue takes ownership of
83 * all buffers from the other queue.
85 void append(IOBufQueue& other, bool pack=false);
86 void append(IOBufQueue&& other, bool pack=false) {
87 append(other, pack); // call lvalue reference overload, above
91 * Copy len bytes, starting at buf, to the end of this queue.
92 * The caller retains ownership of the source data.
94 void append(const void* buf, size_t len);
97 * Copy a string to the end of this queue.
98 * The caller retains ownership of the source data.
100 void append(StringPiece sp) {
101 append(sp.data(), sp.size());
105 * Append a chain of IOBuf objects that point to consecutive regions
108 * Just like IOBuf::wrapBuffer, this should only be used when the caller
109 * knows ahead of time and can ensure that all IOBuf objects that will point
110 * to this buffer will be destroyed before the buffer itself is destroyed;
111 * all other caveats from wrapBuffer also apply.
113 * Every buffer except for the last will wrap exactly blockSize bytes.
114 * Importantly, this method may be used to wrap buffers larger than 4GB.
116 void wrapBuffer(const void* buf, size_t len,
117 uint64_t blockSize=(1U << 31)); // default block size: 2GB
120 * Obtain a writable block of contiguous bytes at the end of this
121 * queue, allocating more space if necessary. The amount of space
122 * reserved will be at least min. If min contiguous space is not
123 * available at the end of the queue, and IOBuf with size newAllocationSize
124 * is appended to the chain and returned. The actual available space
125 * may be larger than newAllocationSize, but will be truncated to max,
128 * If the caller subsequently writes anything into the returned space,
129 * it must call the postallocate() method.
131 * @return The starting address of the block and the length in bytes.
133 * @note The point of the preallocate()/postallocate() mechanism is
134 * to support I/O APIs such as Thrift's TAsyncSocket::ReadCallback
135 * that request a buffer from the application and then, in a later
136 * callback, tell the application how much of the buffer they've
139 std::pair<void*,uint64_t> preallocate(
140 uint64_t min, uint64_t newAllocationSize,
141 uint64_t max = std::numeric_limits<uint64_t>::max()) {
142 auto buf = tailBuf();
143 if (LIKELY(buf && buf->tailroom() >= min)) {
144 return std::make_pair(buf->writableTail(),
145 std::min(max, buf->tailroom()));
148 return preallocateSlow(min, newAllocationSize, max);
152 * Tell the queue that the caller has written data into the first n
153 * bytes provided by the previous preallocate() call.
155 * @note n should be less than or equal to the size returned by
156 * preallocate(). If n is zero, the caller may skip the call
157 * to postallocate(). If n is nonzero, the caller must not
158 * invoke any other non-const methods on this IOBufQueue between
159 * the call to preallocate and the call to postallocate().
161 void postallocate(uint64_t n) {
162 head_->prev()->append(n);
167 * Obtain a writable block of n contiguous bytes, allocating more space
168 * if necessary, and mark it as used. The caller can fill it later.
170 void* allocate(uint64_t n) {
171 void* p = preallocate(n, n).first;
176 void* writableTail() const {
177 auto buf = tailBuf();
178 return buf ? buf->writableTail() : nullptr;
181 size_t tailroom() const {
182 auto buf = tailBuf();
183 return buf ? buf->tailroom() : 0;
187 * Split off the first n bytes of the queue into a separate IOBuf chain,
188 * and transfer ownership of the new chain to the caller. The IOBufQueue
189 * retains ownership of everything after the split point.
191 * @warning If the split point lies in the middle of some IOBuf within
192 * the chain, this function may, as an implementation detail,
195 * @throws std::underflow_error if n exceeds the number of bytes
198 std::unique_ptr<folly::IOBuf> split(size_t n) {
199 return split(n, true);
203 * Similar to split, but will return the entire queue instead of throwing
204 * if n exceeds the number of bytes in the queue.
206 std::unique_ptr<folly::IOBuf> splitAtMost(size_t n) {
207 return split(n, false);
211 * Similar to IOBuf::trimStart, but works on the whole queue. Will
212 * pop off buffers that have been completely trimmed.
214 void trimStart(size_t amount);
217 * Similar to IOBuf::trimEnd, but works on the whole queue. Will
218 * pop off buffers that have been completely trimmed.
220 void trimEnd(size_t amount);
223 * Transfer ownership of the queue's entire IOBuf chain to the caller.
225 std::unique_ptr<folly::IOBuf> move() {
227 return std::move(head_);
233 const folly::IOBuf* front() const {
238 * returns the first IOBuf in the chain and removes it from the chain
240 * @return first IOBuf in the chain or nullptr if none.
242 std::unique_ptr<folly::IOBuf> pop_front();
245 * Total chain length, only valid if cacheLength was specified in the
248 size_t chainLength() const {
249 if (UNLIKELY(!options_.cacheChainLength)) {
250 throw std::invalid_argument("IOBufQueue: chain length not cached");
256 * Returns true iff the IOBuf chain length is 0.
259 return !head_ || head_->empty();
262 const Options& options() const {
267 * Clear the queue. Note that this does not release the buffers, it
268 * just sets their length to zero; useful if you want to reuse the
269 * same queue without reallocating.
274 * Append the queue to a std::string. Non-destructive.
276 void appendToString(std::string& out) const;
279 * Calls IOBuf::gather() on the head of the queue, if it exists.
281 void gather(uint64_t maxLength);
284 IOBufQueue(IOBufQueue&&) noexcept;
285 IOBufQueue& operator=(IOBufQueue&&);
288 IOBuf* tailBuf() const {
289 if (UNLIKELY(!head_)) return nullptr;
290 IOBuf* buf = head_->prev();
291 return LIKELY(!buf->isSharedOne()) ? buf : nullptr;
293 std::pair<void*,uint64_t> preallocateSlow(
294 uint64_t min, uint64_t newAllocationSize, uint64_t max);
296 std::unique_ptr<folly::IOBuf> split(size_t n, bool throwOnUnderflow);
298 static const size_t kChainLengthNotCached = (size_t)-1;
300 IOBufQueue(const IOBufQueue&) = delete;
301 IOBufQueue& operator=(const IOBufQueue&) = delete;
305 // NOTE that chainLength_ is still updated even if !options_.cacheChainLength
306 // because doing it unchecked in postallocate() is faster (no (mis)predicted
309 /** Everything that has been appended but not yet discarded or moved out */
310 std::unique_ptr<folly::IOBuf> head_;