4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
25 #include <linux/ioprio.h>
34 #define BIO_BUG_ON BUG_ON
39 #define BIO_MAX_PAGES 256
40 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
41 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
44 * was unsigned short, but we might as well be ready for > 64kB I/O pages
49 unsigned int bv_offset;
54 struct bio_integrity_payload;
55 typedef void (bio_end_io_t) (struct bio *, int);
56 typedef void (bio_destructor_t) (struct bio *);
59 * main unit of I/O for the block layer and lower layers (ie drivers and
63 sector_t bi_sector; /* device address in 512 byte
65 struct bio *bi_next; /* request queue link */
66 struct block_device *bi_bdev;
67 unsigned long bi_flags; /* status, command, etc */
68 unsigned long bi_rw; /* bottom bits READ/WRITE,
72 unsigned short bi_vcnt; /* how many bio_vec's */
73 unsigned short bi_idx; /* current index into bvl_vec */
75 /* Number of segments in this BIO after
76 * physical address coalescing is performed.
78 unsigned short bi_phys_segments;
80 unsigned int bi_size; /* residual I/O count */
82 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
84 struct bio_vec *bi_io_vec; /* the actual vec list */
86 bio_end_io_t *bi_end_io;
87 atomic_t bi_cnt; /* pin count */
90 #if defined(CONFIG_BLK_DEV_INTEGRITY)
91 struct bio_integrity_payload *bi_integrity; /* data integrity */
94 bio_destructor_t *bi_destructor; /* destructor */
100 #define BIO_UPTODATE 0 /* ok after I/O completion */
101 #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
102 #define BIO_EOF 2 /* out-out-bounds error */
103 #define BIO_SEG_VALID 3 /* bi_phys_segments valid */
104 #define BIO_CLONED 4 /* doesn't own data */
105 #define BIO_BOUNCED 5 /* bio is a bounce bio */
106 #define BIO_USER_MAPPED 6 /* contains user pages */
107 #define BIO_EOPNOTSUPP 7 /* not supported */
108 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
111 * top 4 bits of bio flags indicate the pool this bio came from
113 #define BIO_POOL_BITS (4)
114 #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
115 #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
116 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
121 * bit 0 -- read (not set) or write (set)
122 * bit 1 -- rw-ahead when set
124 * bit 3 -- fail fast, don't want low level driver retries
125 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
126 * bit 5 -- metadata request
127 * bit 6 -- discard sectors
129 #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
130 #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
131 #define BIO_RW_BARRIER 2
132 #define BIO_RW_FAILFAST 3
133 #define BIO_RW_SYNC 4
134 #define BIO_RW_META 5
135 #define BIO_RW_DISCARD 6
138 * upper 16 bits of bi_rw define the io priority of this bio
140 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
141 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
142 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
144 #define bio_set_prio(bio, prio) do { \
145 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
146 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
147 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
151 * various member access, note that bio_data should of course not be used
152 * on highmem page vectors
154 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
155 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
156 #define bio_page(bio) bio_iovec((bio))->bv_page
157 #define bio_offset(bio) bio_iovec((bio))->bv_offset
158 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
159 #define bio_sectors(bio) ((bio)->bi_size >> 9)
160 #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
161 #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
162 #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
163 #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
164 #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
165 #define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
166 #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
168 static inline unsigned int bio_cur_sectors(struct bio *bio)
171 return bio_iovec(bio)->bv_len >> 9;
172 else /* dataless requests such as discard */
173 return bio->bi_size >> 9;
176 static inline void *bio_data(struct bio *bio)
179 return page_address(bio_page(bio)) + bio_offset(bio);
187 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
188 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
191 * queues that have highmem support enabled may still need to revert to
192 * PIO transfers occasionally and thus map high pages temporarily. For
193 * permanent PIO fall back, user is probably better off disabling highmem
194 * I/O completely on that queue (see ide-dma for example)
196 #define __bio_kmap_atomic(bio, idx, kmtype) \
197 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
198 bio_iovec_idx((bio), (idx))->bv_offset)
200 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
206 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
207 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
210 * allow arch override, for eg virtualized architectures (put in asm/io.h)
212 #ifndef BIOVEC_PHYS_MERGEABLE
213 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
214 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
217 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
218 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
219 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
220 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
221 #define BIO_SEG_BOUNDARY(q, b1, b2) \
222 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
224 #define bio_io_error(bio) bio_endio((bio), -EIO)
227 * drivers should not use the __ version unless they _really_ want to
228 * run through the entire bio and not just pending pieces
230 #define __bio_for_each_segment(bvl, bio, i, start_idx) \
231 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
232 i < (bio)->bi_vcnt; \
235 #define bio_for_each_segment(bvl, bio, i) \
236 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
239 * get a reference to a bio, so it won't disappear. the intended use is
243 * submit_bio(rw, bio);
244 * if (bio->bi_flags ...)
248 * without the bio_get(), it could potentially complete I/O before submit_bio
249 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
252 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
254 #if defined(CONFIG_BLK_DEV_INTEGRITY)
256 * bio integrity payload
258 struct bio_integrity_payload {
259 struct bio *bip_bio; /* parent bio */
260 struct bio_vec *bip_vec; /* integrity data vector */
262 sector_t bip_sector; /* virtual start sector */
264 void *bip_buf; /* generated integrity data */
265 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
267 int bip_error; /* saved I/O error */
268 unsigned int bip_size;
270 unsigned short bip_pool; /* pool the ivec came from */
271 unsigned short bip_vcnt; /* # of integrity bio_vecs */
272 unsigned short bip_idx; /* current bip_vec index */
274 struct work_struct bip_work; /* I/O completion */
276 #endif /* CONFIG_BLK_DEV_INTEGRITY */
279 * A bio_pair is used when we need to split a bio.
280 * This can only happen for a bio that refers to just one
281 * page of data, and in the unusual situation when the
282 * page crosses a chunk/device boundary
284 * The address of the master bio is stored in bio1.bi_private
285 * The address of the pool the pair was allocated from is stored
289 struct bio bio1, bio2;
290 struct bio_vec bv1, bv2;
291 #if defined(CONFIG_BLK_DEV_INTEGRITY)
292 struct bio_integrity_payload bip1, bip2;
293 struct bio_vec iv1, iv2;
298 extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
300 extern mempool_t *bio_split_pool;
301 extern void bio_pair_release(struct bio_pair *dbio);
303 extern struct bio_set *bioset_create(int, int);
304 extern void bioset_free(struct bio_set *);
306 extern struct bio *bio_alloc(gfp_t, int);
307 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
308 extern void bio_put(struct bio *);
309 extern void bio_free(struct bio *, struct bio_set *);
311 extern void bio_endio(struct bio *, int);
312 struct request_queue;
313 extern int bio_phys_segments(struct request_queue *, struct bio *);
315 extern void __bio_clone(struct bio *, struct bio *);
316 extern struct bio *bio_clone(struct bio *, gfp_t);
318 extern void bio_init(struct bio *);
320 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
321 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
322 unsigned int, unsigned int);
323 extern int bio_get_nr_vecs(struct block_device *);
324 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
325 unsigned long, unsigned int, int);
327 extern struct bio *bio_map_user_iov(struct request_queue *,
328 struct block_device *,
329 struct sg_iovec *, int, int);
330 extern void bio_unmap_user(struct bio *);
331 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
333 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
335 extern void bio_set_pages_dirty(struct bio *bio);
336 extern void bio_check_pages_dirty(struct bio *bio);
337 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
338 extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
340 extern int bio_uncopy_user(struct bio *);
341 void zero_fill_bio(struct bio *bio);
342 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
343 extern unsigned int bvec_nr_vecs(unsigned short idx);
346 * bio_set is used to allow other portions of the IO system to
347 * allocate their own private memory pools for bio and iovec structures.
348 * These memory pools in turn all allocate from the bio_slab
349 * and the bvec_slabs[].
351 #define BIO_POOL_SIZE 2
352 #define BIOVEC_NR_POOLS 6
356 #if defined(CONFIG_BLK_DEV_INTEGRITY)
357 mempool_t *bio_integrity_pool;
359 mempool_t *bvec_pools[BIOVEC_NR_POOLS];
365 struct kmem_cache *slab;
368 extern struct bio_set *fs_bio_set;
371 * a small number of entries is fine, not going to be performance critical.
372 * basically we just need to survive
374 #define BIO_SPLIT_ENTRIES 2
376 #ifdef CONFIG_HIGHMEM
378 * remember to add offset! and never ever reenable interrupts between a
379 * bvec_kmap_irq and bvec_kunmap_irq!!
381 * This function MUST be inlined - it plays with the CPU interrupt flags.
383 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
388 * might not be a highmem page, but the preempt/irq count
389 * balancing is a lot nicer this way
391 local_irq_save(*flags);
392 addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
394 BUG_ON(addr & ~PAGE_MASK);
396 return (char *) addr + bvec->bv_offset;
399 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
401 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
403 kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
404 local_irq_restore(*flags);
408 #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
409 #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
412 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
413 unsigned long *flags)
415 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
417 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
419 #define bio_kmap_irq(bio, flags) \
420 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
421 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
424 * Check whether this bio carries any data or not. A NULL bio is allowed.
426 static inline int bio_has_data(struct bio *bio)
428 return bio && bio->bi_io_vec != NULL;
431 #if defined(CONFIG_BLK_DEV_INTEGRITY)
433 #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
434 #define bip_vec(bip) bip_vec_idx(bip, 0)
436 #define __bip_for_each_vec(bvl, bip, i, start_idx) \
437 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
438 i < (bip)->bip_vcnt; \
441 #define bip_for_each_vec(bvl, bip, i) \
442 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
444 static inline int bio_integrity(struct bio *bio)
446 #if defined(CONFIG_BLK_DEV_INTEGRITY)
447 return bio->bi_integrity != NULL;
453 extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
454 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
455 extern void bio_integrity_free(struct bio *, struct bio_set *);
456 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
457 extern int bio_integrity_enabled(struct bio *bio);
458 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
459 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
460 extern int bio_integrity_prep(struct bio *);
461 extern void bio_integrity_endio(struct bio *, int);
462 extern void bio_integrity_advance(struct bio *, unsigned int);
463 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
464 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
465 extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *);
466 extern int bioset_integrity_create(struct bio_set *, int);
467 extern void bioset_integrity_free(struct bio_set *);
468 extern void bio_integrity_init_slab(void);
470 #else /* CONFIG_BLK_DEV_INTEGRITY */
472 #define bio_integrity(a) (0)
473 #define bioset_integrity_create(a, b) (0)
474 #define bio_integrity_prep(a) (0)
475 #define bio_integrity_enabled(a) (0)
476 #define bio_integrity_clone(a, b, c) (0)
477 #define bioset_integrity_free(a) do { } while (0)
478 #define bio_integrity_free(a, b) do { } while (0)
479 #define bio_integrity_endio(a, b) do { } while (0)
480 #define bio_integrity_advance(a, b) do { } while (0)
481 #define bio_integrity_trim(a, b, c) do { } while (0)
482 #define bio_integrity_split(a, b, c) do { } while (0)
483 #define bio_integrity_set_tag(a, b, c) do { } while (0)
484 #define bio_integrity_get_tag(a, b, c) do { } while (0)
485 #define bio_integrity_init_slab(a) do { } while (0)
487 #endif /* CONFIG_BLK_DEV_INTEGRITY */
489 #endif /* CONFIG_BLOCK */
490 #endif /* __LINUX_BIO_H */