block: Add bio_advance()
authorKent Overstreet <koverstreet@google.com>
Fri, 28 Sep 2012 20:17:55 +0000 (13:17 -0700)
committerKent Overstreet <koverstreet@google.com>
Sat, 23 Mar 2013 21:15:27 +0000 (14:15 -0700)
This is prep work for immutable bio vecs; we first want to centralize
where bvecs are modified.

Next two patches convert some existing code to use this function.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
fs/bio.c
include/linux/bio.h
include/linux/blk_types.h

index 40aa96eae99ffd7ddce088ccb875b16e0b0cfcdb..7edc08d2246cf03548cce3b8b72d863d4d16269f 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -752,6 +752,47 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
 }
 EXPORT_SYMBOL(bio_add_page);
 
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio:       bio to advance
+ * @bytes:     number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+void bio_advance(struct bio *bio, unsigned bytes)
+{
+       if (bio_integrity(bio))
+               bio_integrity_advance(bio, bytes);
+
+       bio->bi_sector += bytes >> 9;
+       bio->bi_size -= bytes;
+
+       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+               return;
+
+       while (bytes) {
+               if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+                       WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
+                                 bio->bi_idx, bio->bi_vcnt);
+                       break;
+               }
+
+               if (bytes >= bio_iovec(bio)->bv_len) {
+                       bytes -= bio_iovec(bio)->bv_len;
+                       bio->bi_idx++;
+               } else {
+                       bio_iovec(bio)->bv_len -= bytes;
+                       bio_iovec(bio)->bv_offset += bytes;
+                       bytes = 0;
+               }
+       }
+}
+EXPORT_SYMBOL(bio_advance);
+
 struct bio_map_data {
        struct bio_vec *iovecs;
        struct sg_iovec *sgvecs;
index 669b1cb18fee43b31f260ca822a3af9e8b25717b..fcb4dba2d8ea95b55300b69aec0e8a93003c2b02 100644 (file)
@@ -248,6 +248,8 @@ extern void bio_endio(struct bio *, int);
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
+extern void bio_advance(struct bio *, unsigned);
+
 extern void bio_init(struct bio *);
 extern void bio_reset(struct bio *);
 
index cdf11191e6450fa680b7a8b71352c3642633b9fb..c178d25e588b85df5881a16fd4c5731cffa29391 100644 (file)
@@ -197,6 +197,8 @@ enum rq_flag_bits {
         REQ_SECURE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
+#define BIO_NO_ADVANCE_ITER_MASK       (REQ_DISCARD|REQ_WRITE_SAME)
+
 /* This mask is used for both bio and request merge checking */
 #define REQ_NOMERGE_FLAGS \
        (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)