From: Paolo Bonzini Date: Wed, 20 Mar 2013 05:14:27 +0000 (+1030) Subject: virtio-blk: reorganize virtblk_add_req X-Git-Tag: firefly_0821_release~3680^2~600^2~41 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=5ee21a52c05b5670ceeaa502c15cf306e379f714;p=firefly-linux-kernel-4.4.55.git virtio-blk: reorganize virtblk_add_req Right now, both virtblk_add_req and virtblk_add_req_wait call virtqueue_add_buf. To prepare for the next patches, abstract the call to virtqueue_add_buf into a new function __virtblk_add_req, and include the waiting logic directly in virtblk_add_req. Signed-off-by: Paolo Bonzini Reviewed-by: Asias He Signed-off-by: Rusty Russell --- diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 922bcb97e23a..b271650032fa 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -100,50 +100,39 @@ static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, return vbr; } -static void virtblk_add_buf_wait(struct virtio_blk *vblk, - struct virtblk_req *vbr, - unsigned long out, - unsigned long in) +static inline int __virtblk_add_req(struct virtqueue *vq, + struct virtblk_req *vbr, + unsigned long out, + unsigned long in) { + return virtqueue_add_buf(vq, vbr->sg, out, in, vbr, GFP_ATOMIC); +} + +static void virtblk_add_req(struct virtblk_req *vbr, + unsigned int out, unsigned int in) +{ + struct virtio_blk *vblk = vbr->vblk; DEFINE_WAIT(wait); + int ret; - for (;;) { + spin_lock_irq(vblk->disk->queue->queue_lock); + while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, + out, in)) < 0)) { prepare_to_wait_exclusive(&vblk->queue_wait, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_irq(vblk->disk->queue->queue_lock); + io_schedule(); spin_lock_irq(vblk->disk->queue->queue_lock); - if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, - GFP_ATOMIC) < 0) { - spin_unlock_irq(vblk->disk->queue->queue_lock); - io_schedule(); - } else { - virtqueue_kick(vblk->vq); - spin_unlock_irq(vblk->disk->queue->queue_lock); - break; - } + finish_wait(&vblk->queue_wait, &wait); } - finish_wait(&vblk->queue_wait, &wait); -} - -static inline void virtblk_add_req(struct virtblk_req *vbr, - unsigned int out, unsigned int in) -{ - struct virtio_blk *vblk = vbr->vblk; - - spin_lock_irq(vblk->disk->queue->queue_lock); - if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, - GFP_ATOMIC) < 0)) { - spin_unlock_irq(vblk->disk->queue->queue_lock); - virtblk_add_buf_wait(vblk, vbr, out, in); - return; - } virtqueue_kick(vblk->vq); spin_unlock_irq(vblk->disk->queue->queue_lock); } -static int virtblk_bio_send_flush(struct virtblk_req *vbr) +static void virtblk_bio_send_flush(struct virtblk_req *vbr) { unsigned int out = 0, in = 0; @@ -155,11 +144,9 @@ static int virtblk_bio_send_flush(struct virtblk_req *vbr) sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status)); virtblk_add_req(vbr, out, in); - - return 0; } -static int virtblk_bio_send_data(struct virtblk_req *vbr) +static void virtblk_bio_send_data(struct virtblk_req *vbr) { struct virtio_blk *vblk = vbr->vblk; unsigned int num, out = 0, in = 0; @@ -188,8 +175,6 @@ static int virtblk_bio_send_data(struct virtblk_req *vbr) } virtblk_add_req(vbr, out, in); - - return 0; } static void virtblk_bio_send_data_work(struct work_struct *work)