2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_IO_IOBUF_QUEUE_H
18 #define FOLLY_IO_IOBUF_QUEUE_H
20 #include <folly/io/IOBuf.h>
28 * An IOBufQueue encapsulates a chain of IOBufs and provides
29 * convenience functions to append data to the back of the chain
30 * and remove data from the front.
32 * You may also prepend data into the headroom of the first buffer in the
38 Options() : cacheChainLength(false) { }
39 bool cacheChainLength;
43 * Commonly used Options, currently the only possible value other than
46 static Options cacheChainLength() {
48 options.cacheChainLength = true;
52 explicit IOBufQueue(const Options& options = Options());
55 * Return a space to prepend bytes and the amount of headroom available.
57 std::pair<void*, uint64_t> headroom();
60 * Indicate that n bytes from the headroom have been used.
62 void markPrepended(uint64_t n);
65 * Prepend an existing range; throws std::overflow_error if not enough
68 void prepend(const void* buf, uint64_t n);
71 * Add a buffer or buffer chain to the end of this queue. The
72 * queue takes ownership of buf.
74 * If pack is true, we try to reduce wastage at the end of this queue
75 * by copying some data from the first buffers in the buf chain (and
76 * releasing the buffers), if possible. If pack is false, we leave
77 * the chain topology unchanged.
79 void append(std::unique_ptr<folly::IOBuf>&& buf,
83 * Add a queue to the end of this queue. The queue takes ownership of
84 * all buffers from the other queue.
86 void append(IOBufQueue& other, bool pack=false);
87 void append(IOBufQueue&& other, bool pack=false) {
88 append(other, pack); // call lvalue reference overload, above
92 * Copy len bytes, starting at buf, to the end of this queue.
93 * The caller retains ownership of the source data.
95 void append(const void* buf, size_t len);
98 * Copy a string to the end of this queue.
99 * The caller retains ownership of the source data.
101 void append(StringPiece sp) {
102 append(sp.data(), sp.size());
106 * Append a chain of IOBuf objects that point to consecutive regions
109 * Just like IOBuf::wrapBuffer, this should only be used when the caller
110 * knows ahead of time and can ensure that all IOBuf objects that will point
111 * to this buffer will be destroyed before the buffer itself is destroyed;
112 * all other caveats from wrapBuffer also apply.
114 * Every buffer except for the last will wrap exactly blockSize bytes.
115 * Importantly, this method may be used to wrap buffers larger than 4GB.
117 void wrapBuffer(const void* buf, size_t len,
118 uint64_t blockSize=(1U << 31)); // default block size: 2GB
121 * Obtain a writable block of contiguous bytes at the end of this
122 * queue, allocating more space if necessary. The amount of space
123 * reserved will be at least min. If min contiguous space is not
124 * available at the end of the queue, and IOBuf with size newAllocationSize
125 * is appended to the chain and returned. The actual available space
126 * may be larger than newAllocationSize, but will be truncated to max,
129 * If the caller subsequently writes anything into the returned space,
130 * it must call the postallocate() method.
132 * @return The starting address of the block and the length in bytes.
134 * @note The point of the preallocate()/postallocate() mechanism is
135 * to support I/O APIs such as Thrift's TAsyncSocket::ReadCallback
136 * that request a buffer from the application and then, in a later
137 * callback, tell the application how much of the buffer they've
140 std::pair<void*,uint64_t> preallocate(
141 uint64_t min, uint64_t newAllocationSize,
142 uint64_t max = std::numeric_limits<uint64_t>::max()) {
143 auto buf = tailBuf();
144 if (LIKELY(buf && buf->tailroom() >= min)) {
145 return std::make_pair(buf->writableTail(),
146 std::min(max, buf->tailroom()));
149 return preallocateSlow(min, newAllocationSize, max);
153 * Tell the queue that the caller has written data into the first n
154 * bytes provided by the previous preallocate() call.
156 * @note n should be less than or equal to the size returned by
157 * preallocate(). If n is zero, the caller may skip the call
158 * to postallocate(). If n is nonzero, the caller must not
159 * invoke any other non-const methods on this IOBufQueue between
160 * the call to preallocate and the call to postallocate().
162 void postallocate(uint64_t n) {
163 head_->prev()->append(n);
168 * Obtain a writable block of n contiguous bytes, allocating more space
169 * if necessary, and mark it as used. The caller can fill it later.
171 void* allocate(uint64_t n) {
172 void* p = preallocate(n, n).first;
177 void* writableTail() const {
178 auto buf = tailBuf();
179 return buf ? buf->writableTail() : nullptr;
182 size_t tailroom() const {
183 auto buf = tailBuf();
184 return buf ? buf->tailroom() : 0;
188 * Split off the first n bytes of the queue into a separate IOBuf chain,
189 * and transfer ownership of the new chain to the caller. The IOBufQueue
190 * retains ownership of everything after the split point.
192 * @warning If the split point lies in the middle of some IOBuf within
193 * the chain, this function may, as an implementation detail,
196 * @throws std::underflow_error if n exceeds the number of bytes
199 std::unique_ptr<folly::IOBuf> split(size_t n);
202 * Similar to IOBuf::trimStart, but works on the whole queue. Will
203 * pop off buffers that have been completely trimmed.
205 void trimStart(size_t amount);
208 * Similar to IOBuf::trimEnd, but works on the whole queue. Will
209 * pop off buffers that have been completely trimmed.
211 void trimEnd(size_t amount);
214 * Transfer ownership of the queue's entire IOBuf chain to the caller.
216 std::unique_ptr<folly::IOBuf> move() {
218 return std::move(head_);
224 const folly::IOBuf* front() const {
229 * returns the first IOBuf in the chain and removes it from the chain
231 * @return first IOBuf in the chain or nullptr if none.
233 std::unique_ptr<folly::IOBuf> pop_front();
236 * Total chain length, only valid if cacheLength was specified in the
239 size_t chainLength() const {
240 if (UNLIKELY(!options_.cacheChainLength)) {
241 throw std::invalid_argument("IOBufQueue: chain length not cached");
247 * Returns true iff the IOBuf chain length is 0.
250 return !head_ || head_->empty();
253 const Options& options() const {
258 * Clear the queue. Note that this does not release the buffers, it
259 * just sets their length to zero; useful if you want to reuse the
260 * same queue without reallocating.
265 * Append the queue to a std::string. Non-destructive.
267 void appendToString(std::string& out) const;
270 IOBufQueue(IOBufQueue&&) noexcept;
271 IOBufQueue& operator=(IOBufQueue&&);
274 IOBuf* tailBuf() const {
275 if (UNLIKELY(!head_)) return nullptr;
276 IOBuf* buf = head_->prev();
277 return LIKELY(!buf->isSharedOne()) ? buf : nullptr;
279 std::pair<void*,uint64_t> preallocateSlow(
280 uint64_t min, uint64_t newAllocationSize, uint64_t max);
282 static const size_t kChainLengthNotCached = (size_t)-1;
284 IOBufQueue(const IOBufQueue&) = delete;
285 IOBufQueue& operator=(const IOBufQueue&) = delete;
289 // NOTE that chainLength_ is still updated even if !options_.cacheChainLength
290 // because doing it unchecked in postallocate() is faster (no (mis)predicted
293 /** Everything that has been appended but not yet discarded or moved out */
294 std::unique_ptr<folly::IOBuf> head_;
299 #endif // FOLLY_IO_IOBUF_QUEUE_H