X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-cache-target.c;h=dd90d1236f4a46be1cf28d7ef8ebc5d64235adf7;hb=d86a2fe4dc97cbbad04f5db941d9a1bb82e22b5f;hp=1fe93cfea7d309a659d79fe2b953b5f2dbe7b466;hpb=04782ca20a8ebb5acf0c35756a38964ae5ea7321;p=firefly-linux-kernel-4.4.55.git diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1fe93cfea7d3..dd90d1236f4a 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -424,7 +424,6 @@ static void free_migration(struct dm_cache_migration *mg) wake_up(&cache->migration_wait); mempool_free(mg, cache->migration_pool); - wake_worker(cache); } static int prealloc_data_structs(struct cache *cache, struct prealloc *p) @@ -919,14 +918,14 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) wake_worker(cache); } -static void writethrough_endio(struct bio *bio, int err) +static void writethrough_endio(struct bio *bio) { struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); dm_unhook_bio(&pb->hook_info, bio); - if (err) { - bio_endio(bio, err); + if (bio->bi_error) { + bio_endio(bio); return; } @@ -1064,14 +1063,6 @@ static void dec_io_migrations(struct cache *cache) atomic_dec(&cache->nr_io_migrations); } -static void __cell_release(struct cache *cache, struct dm_bio_prison_cell *cell, - bool holder, struct bio_list *bios) -{ - (holder ? dm_cell_release : dm_cell_release_no_holder) - (cache->prison, cell, bios); - free_prison_cell(cache, cell); -} - static bool discard_or_flush(struct bio *bio) { return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD); @@ -1079,14 +1070,13 @@ static bool discard_or_flush(struct bio *bio) static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) { - if (discard_or_flush(cell->holder)) + if (discard_or_flush(cell->holder)) { /* - * We have to handle these bios - * individually. + * We have to handle these bios individually. */ - __cell_release(cache, cell, true, &cache->deferred_bios); - - else + dm_cell_release(cache->prison, cell, &cache->deferred_bios); + free_prison_cell(cache, cell); + } else list_add_tail(&cell->user_list, &cache->deferred_cells); } @@ -1113,7 +1103,7 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, boo static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err) { dm_cell_error(cache->prison, cell, err); - dm_bio_prison_free_cell(cache->prison, cell); + free_prison_cell(cache, cell); } static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell) @@ -1123,8 +1113,11 @@ static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell) static void free_io_migration(struct dm_cache_migration *mg) { - dec_io_migrations(mg->cache); + struct cache *cache = mg->cache; + + dec_io_migrations(cache); free_migration(mg); + wake_worker(cache); } static void migration_failure(struct dm_cache_migration *mg) @@ -1231,7 +1224,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) * The block was promoted via an overwrite, so it's dirty. */ set_dirty(cache, mg->new_oblock, mg->cblock); - bio_endio(mg->new_ocell->holder, 0); + bio_endio(mg->new_ocell->holder); cell_defer(cache, mg->new_ocell, false); } free_io_migration(mg); @@ -1284,7 +1277,7 @@ static void issue_copy(struct dm_cache_migration *mg) } } -static void overwrite_endio(struct bio *bio, int err) +static void overwrite_endio(struct bio *bio) { struct dm_cache_migration *mg = bio->bi_private; struct cache *cache = mg->cache; @@ -1294,7 +1287,7 @@ static void overwrite_endio(struct bio *bio, int err) dm_unhook_bio(&pb->hook_info, bio); - if (err) + if (bio->bi_error) mg->err = true; mg->requeue_holder = false; @@ -1351,16 +1344,18 @@ static void issue_discard(struct dm_cache_migration *mg) { dm_dblock_t b, e; struct bio *bio = mg->new_ocell->holder; + struct cache *cache = mg->cache; - calc_discard_block_range(mg->cache, bio, &b, &e); + calc_discard_block_range(cache, bio, &b, &e); while (b != e) { - set_discard(mg->cache, b); + set_discard(cache, b); b = to_dblock(from_dblock(b) + 1); } - bio_endio(bio, 0); - cell_defer(mg->cache, mg->new_ocell, false); + bio_endio(bio); + cell_defer(cache, mg->new_ocell, false); free_migration(mg); + wake_worker(cache); } static void issue_copy_or_discard(struct dm_cache_migration *mg) @@ -1631,7 +1626,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs, calc_discard_block_range(cache, bio, &b, &e); if (b == e) { - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -1729,6 +1724,8 @@ static void remap_cell_to_origin_clear_discard(struct cache *cache, remap_to_origin(cache, bio); issue(cache, bio); } + + free_prison_cell(cache, cell); } static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell, @@ -1763,6 +1760,8 @@ static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_ remap_to_cache(cache, bio, cblock); issue(cache, bio); } + + free_prison_cell(cache, cell); } /*----------------------------------------------------------------*/ @@ -2217,8 +2216,10 @@ static void requeue_deferred_bios(struct cache *cache) bio_list_merge(&bios, &cache->deferred_bios); bio_list_init(&cache->deferred_bios); - while ((bio = bio_list_pop(&bios))) - bio_endio(bio, DM_ENDIO_REQUEUE); + while ((bio = bio_list_pop(&bios))) { + bio->bi_error = DM_ENDIO_REQUEUE; + bio_endio(bio); + } } static int more_work(struct cache *cache) @@ -3123,7 +3124,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) * This is a duplicate writethrough io that is no * longer needed because the block has been demoted. */ - bio_endio(bio, 0); + bio_endio(bio); // FIXME: remap everything as a miss cell_defer(cache, cell, false); r = DM_MAPIO_SUBMITTED; @@ -3778,26 +3779,6 @@ static int cache_iterate_devices(struct dm_target *ti, return r; } -/* - * We assume I/O is going to the origin (which is the volume - * more likely to have restrictions e.g. by being striped). - * (Looking up the exact location of the data would be expensive - * and could always be out of date by the time the bio is submitted.) - */ -static int cache_bvec_merge(struct dm_target *ti, - struct bvec_merge_data *bvm, - struct bio_vec *biovec, int max_size) -{ - struct cache *cache = ti->private; - struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); - - if (!q->merge_bvec_fn) - return max_size; - - bvm->bi_bdev = cache->origin_dev->bdev; - return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); -} - static void set_discard_limits(struct cache *cache, struct queue_limits *limits) { /* @@ -3841,7 +3822,6 @@ static struct target_type cache_target = { .status = cache_status, .message = cache_message, .iterate_devices = cache_iterate_devices, - .merge = cache_bvec_merge, .io_hints = cache_io_hints, };