block: introduce blk_init_flush and its pair
authorMing Lei <ming.lei@canonical.com>
Thu, 25 Sep 2014 15:23:40 +0000 (23:23 +0800)
committerJens Axboe <axboe@fb.com>
Thu, 25 Sep 2014 21:22:35 +0000 (15:22 -0600)
These two temporary functions are introduced for holding flush
initialization and de-initialization, so that we can
introduce 'flush queue' easier in the following patch. And
once 'flush queue' and its allocation/free functions are ready,
they will be removed for sake of code readability.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-flush.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
block/blk.h

index 6946a4275e6ff2e689de8b60dff18e7999fb3f80..0a9d17269957cf4004243ea2723a4926fbd626af 100644 (file)
@@ -705,8 +705,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
        if (!q)
                return NULL;
 
-       q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
-       if (!q->flush_rq)
+       if (blk_init_flush(q))
                return NULL;
 
        if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
@@ -742,7 +741,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
        return q;
 
 fail:
-       kfree(q->flush_rq);
+       blk_exit_flush(q);
        return NULL;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
index 55028a7079270afe6e1958d2bfd3635c23252755..c72ab32fd8eb8ed95f184850bdeda21572930331 100644 (file)
@@ -472,7 +472,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 }
 EXPORT_SYMBOL(blkdev_issue_flush);
 
-int blk_mq_init_flush(struct request_queue *q)
+static int blk_mq_init_flush(struct request_queue *q)
 {
        struct blk_mq_tag_set *set = q->tag_set;
 
@@ -485,3 +485,20 @@ int blk_mq_init_flush(struct request_queue *q)
                return -ENOMEM;
        return 0;
 }
+
+int blk_init_flush(struct request_queue *q)
+{
+       if (q->mq_ops)
+               return blk_mq_init_flush(q);
+
+       q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+       if (!q->flush_rq)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void blk_exit_flush(struct request_queue *q)
+{
+       kfree(q->flush_rq);
+}
index 78bcf8bfb22a71072de9625b60628efc087c516e..2758cdf2de941d1786045cbfca9158f8496ba8fc 100644 (file)
@@ -1859,7 +1859,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        blk_mq_add_queue_tag_set(set, q);
 
-       if (blk_mq_init_flush(q))
+       if (blk_init_flush(q))
                goto err_hw_queues;
 
        blk_mq_map_swqueue(q);
index ecac69c0893779adc1a4c82e8b27bc32b7cf9f5e..d567d5283ffa788640e15163167530f874fc1a80 100644 (file)
@@ -27,7 +27,6 @@ struct blk_mq_ctx {
 
 void __blk_mq_complete_request(struct request *rq);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
-int blk_mq_init_flush(struct request_queue *q);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
 void blk_mq_clone_flush_request(struct request *flush_rq,
index 17f5c84ce7bfb588a5a34bc244a7ac38067b4391..949075952119f83e23600e02b7942ba74e8b857c 100644 (file)
@@ -517,11 +517,11 @@ static void blk_release_queue(struct kobject *kobj)
        if (q->queue_tags)
                __blk_queue_free_tags(q);
 
+       blk_exit_flush(q);
+
        if (q->mq_ops)
                blk_mq_free_queue(q);
 
-       kfree(q->flush_rq);
-
        blk_trace_shutdown(q);
 
        bdi_destroy(&q->backing_dev_info);
index e515a285d4c9640bd269555ad463ca887014bd11..c6fa3d4c6a897f70b74acd5dba2723eeca38d4e5 100644 (file)
@@ -22,6 +22,9 @@ static inline void __blk_get_queue(struct request_queue *q)
        kobject_get(&q->kobj);
 }
 
+int blk_init_flush(struct request_queue *q);
+void blk_exit_flush(struct request_queue *q);
+
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
                gfp_t gfp_mask);
 void blk_exit_rl(struct request_list *rl);