2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
14 static void bch_bi_idx_hack_endio(struct bio *bio, int error)
16 struct bio *p = bio->bi_private;
22 static void bch_generic_make_request_hack(struct bio *bio)
25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
27 memcpy(clone->bi_io_vec,
29 bio_segments(bio) * sizeof(struct bio_vec));
31 clone->bi_sector = bio->bi_sector;
32 clone->bi_bdev = bio->bi_bdev;
33 clone->bi_rw = bio->bi_rw;
34 clone->bi_vcnt = bio_segments(bio);
35 clone->bi_size = bio->bi_size;
37 clone->bi_private = bio;
38 clone->bi_end_io = bch_bi_idx_hack_endio;
44 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
45 * bios might have had more than that (before we split them per device
48 * To be taken out once immutable bvec stuff is in.
50 bio->bi_max_vecs = bio->bi_vcnt;
52 generic_make_request(bio);
56 * bch_bio_split - split a bio
58 * @sectors: number of sectors to split from the front of @bio
60 * @bs: bio set to allocate from
62 * Allocates and returns a new bio which represents @sectors from the start of
63 * @bio, and updates @bio to represent the remaining sectors.
65 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
68 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
69 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
70 * freed before the split.
72 struct bio *bch_bio_split(struct bio *bio, int sectors,
73 gfp_t gfp, struct bio_set *bs)
75 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
77 struct bio *ret = NULL;
81 if (sectors >= bio_sectors(bio))
84 if (bio->bi_rw & REQ_DISCARD) {
85 ret = bio_alloc_bioset(gfp, 1, bs);
92 bio_for_each_segment(bv, bio, idx) {
93 vcnt = idx - bio->bi_idx;
96 ret = bio_alloc_bioset(gfp, vcnt, bs);
100 memcpy(ret->bi_io_vec, bio_iovec(bio),
101 sizeof(struct bio_vec) * vcnt);
104 } else if (nbytes < bv->bv_len) {
105 ret = bio_alloc_bioset(gfp, ++vcnt, bs);
109 memcpy(ret->bi_io_vec, bio_iovec(bio),
110 sizeof(struct bio_vec) * vcnt);
112 ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
113 bv->bv_offset += nbytes;
114 bv->bv_len -= nbytes;
118 nbytes -= bv->bv_len;
121 ret->bi_bdev = bio->bi_bdev;
122 ret->bi_sector = bio->bi_sector;
123 ret->bi_size = sectors << 9;
124 ret->bi_rw = bio->bi_rw;
126 ret->bi_max_vecs = vcnt;
128 bio->bi_sector += sectors;
129 bio->bi_size -= sectors << 9;
132 if (bio_integrity(bio)) {
133 if (bio_integrity_clone(ret, bio, gfp)) {
138 bio_integrity_trim(ret, 0, bio_sectors(ret));
139 bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
145 static unsigned bch_bio_max_sectors(struct bio *bio)
147 unsigned ret = bio_sectors(bio);
148 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
149 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
150 queue_max_segments(q));
152 if (bio->bi_rw & REQ_DISCARD)
153 return min(ret, q->limits.max_discard_sectors);
155 if (bio_segments(bio) > max_segments ||
162 bio_for_each_segment(bv, bio, i) {
163 struct bvec_merge_data bvm = {
164 .bi_bdev = bio->bi_bdev,
165 .bi_sector = bio->bi_sector,
170 if (seg == max_segments)
173 if (q->merge_bvec_fn &&
174 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
178 ret += bv->bv_len >> 9;
182 ret = min(ret, queue_max_sectors(q));
185 ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
190 static void bch_bio_submit_split_done(struct closure *cl)
192 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
194 s->bio->bi_end_io = s->bi_end_io;
195 s->bio->bi_private = s->bi_private;
196 bio_endio(s->bio, 0);
198 closure_debug_destroy(&s->cl);
199 mempool_free(s, s->p->bio_split_hook);
202 static void bch_bio_submit_split_endio(struct bio *bio, int error)
204 struct closure *cl = bio->bi_private;
205 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
208 clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
214 void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
216 struct bio_split_hook *s;
219 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
222 if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
225 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
226 closure_init(&s->cl, NULL);
230 s->bi_end_io = bio->bi_end_io;
231 s->bi_private = bio->bi_private;
235 n = bch_bio_split(bio, bch_bio_max_sectors(bio),
236 GFP_NOIO, s->p->bio_split);
238 n->bi_end_io = bch_bio_submit_split_endio;
239 n->bi_private = &s->cl;
242 bch_generic_make_request_hack(n);
245 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
247 bch_generic_make_request_hack(bio);
250 /* Bios with headers */
252 void bch_bbio_free(struct bio *bio, struct cache_set *c)
254 struct bbio *b = container_of(bio, struct bbio, bio);
255 mempool_free(b, c->bio_meta);
258 struct bio *bch_bbio_alloc(struct cache_set *c)
260 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
261 struct bio *bio = &b->bio;
264 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
265 bio->bi_max_vecs = bucket_pages(c);
266 bio->bi_io_vec = bio->bi_inline_vecs;
271 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
273 struct bbio *b = container_of(bio, struct bbio, bio);
275 bio->bi_sector = PTR_OFFSET(&b->key, 0);
276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
278 b->submit_time_us = local_clock_us();
279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
282 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
283 struct bkey *k, unsigned ptr)
285 struct bbio *b = container_of(bio, struct bbio, bio);
286 bch_bkey_copy_single_ptr(&b->key, k, ptr);
287 __bch_submit_bbio(bio, c);
292 void bch_count_io_errors(struct cache *ca, int error, const char *m)
295 * The halflife of an error is:
296 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
299 if (ca->set->error_decay) {
300 unsigned count = atomic_inc_return(&ca->io_count);
302 while (count > ca->set->error_decay) {
304 unsigned old = count;
305 unsigned new = count - ca->set->error_decay;
308 * First we subtract refresh from count; each time we
309 * succesfully do so, we rescale the errors once:
312 count = atomic_cmpxchg(&ca->io_count, old, new);
317 errors = atomic_read(&ca->io_errors);
320 new = ((uint64_t) errors * 127) / 128;
321 errors = atomic_cmpxchg(&ca->io_errors,
323 } while (old != errors);
329 char buf[BDEVNAME_SIZE];
330 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
332 errors >>= IO_ERROR_SHIFT;
334 if (errors < ca->set->error_limit)
335 pr_err("%s: IO error on %s, recovering",
336 bdevname(ca->bdev, buf), m);
338 bch_cache_set_error(ca->set,
339 "%s: too many IO errors %s",
340 bdevname(ca->bdev, buf), m);
344 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
345 int error, const char *m)
347 struct bbio *b = container_of(bio, struct bbio, bio);
348 struct cache *ca = PTR_CACHE(c, &b->key, 0);
350 unsigned threshold = bio->bi_rw & REQ_WRITE
351 ? c->congested_write_threshold_us
352 : c->congested_read_threshold_us;
355 unsigned t = local_clock_us();
357 int us = t - b->submit_time_us;
358 int congested = atomic_read(&c->congested);
360 if (us > (int) threshold) {
362 c->congested_last_us = t;
364 ms = min(ms, CONGESTED_MAX + congested);
365 atomic_sub(ms, &c->congested);
366 } else if (congested < 0)
367 atomic_inc(&c->congested);
370 bch_count_io_errors(ca, error, m);
373 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
374 int error, const char *m)
376 struct closure *cl = bio->bi_private;
378 bch_bbio_count_io_errors(c, bio, error, m);