1 /******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/delay.h>
42 #include <linux/freezer.h>
44 #include <xen/events.h>
46 #include <asm/xen/hypervisor.h>
47 #include <asm/xen/hypercall.h>
51 * These are rather arbitrary. They are fairly large because adjacent requests
52 * pulled from a communication ring are quite likely to end up being part of
53 * the same scatter/gather request at the disc.
55 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57 * This will increase the chances of being able to write whole tracks.
58 * 64 should be enough to keep us competitive with Linux.
60 static int blkif_reqs = 64;
61 module_param_named(reqs, blkif_reqs, int, 0);
62 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64 /* Run-time switchable: /sys/module/blkback/parameters/ */
65 static unsigned int log_stats = 0;
66 static unsigned int debug_lvl = 0;
67 module_param(log_stats, int, 0644);
68 module_param(debug_lvl, int, 0644);
71 * Each outstanding request that we've passed to the lower device layers has a
72 * 'pending_req' allocated to it. Each buffer_head that completes decrements
73 * the pendcnt towards zero. When it hits zero, the specified domain has a
74 * response queued for it, with the saved 'id' passed back.
81 unsigned short operation;
83 struct list_head free_list;
86 #define BLKBACK_INVALID_HANDLE (~0)
89 pending_req_t *pending_reqs;
90 struct list_head pending_free;
91 spinlock_t pending_free_lock;
92 wait_queue_head_t pending_free_wq;
93 struct page **pending_pages;
94 grant_handle_t *pending_grant_handles;
97 static struct xen_blkbk *blkbk;
99 static inline int vaddr_pagenr(pending_req_t *req, int seg)
101 return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
104 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
106 static inline unsigned long vaddr(pending_req_t *req, int seg)
108 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
109 return (unsigned long)pfn_to_kaddr(pfn);
112 #define pending_handle(_req, _seg) \
113 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
116 static int do_block_io_op(blkif_t *blkif);
117 static void dispatch_rw_block_io(blkif_t *blkif,
118 struct blkif_request *req,
119 pending_req_t *pending_req);
120 static void make_response(blkif_t *blkif, u64 id,
121 unsigned short op, int st);
123 /******************************************************************
126 static pending_req_t* alloc_req(void)
128 pending_req_t *req = NULL;
131 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
132 if (!list_empty(&blkbk->pending_free)) {
133 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
134 list_del(&req->free_list);
136 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
140 static void free_req(pending_req_t *req)
145 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
146 was_empty = list_empty(&blkbk->pending_free);
147 list_add(&req->free_list, &blkbk->pending_free);
148 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
150 wake_up(&blkbk->pending_free_wq);
153 static void unplug_queue(blkif_t *blkif)
155 if (blkif->plug == NULL)
157 if (blkif->plug->unplug_fn)
158 blkif->plug->unplug_fn(blkif->plug);
159 blk_put_queue(blkif->plug);
163 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
165 struct request_queue *q = bdev_get_queue(bdev);
167 if (q == blkif->plug)
174 static void fast_flush_area(pending_req_t *req)
176 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
177 unsigned int i, invcount = 0;
178 grant_handle_t handle;
181 for (i = 0; i < req->nr_pages; i++) {
182 handle = pending_handle(req, i);
183 if (handle == BLKBACK_INVALID_HANDLE)
185 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
186 GNTMAP_host_map, handle);
187 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
191 ret = HYPERVISOR_grant_table_op(
192 GNTTABOP_unmap_grant_ref, unmap, invcount);
194 /* Note, we use invcount, so nr->pages, so we can't index
195 * using vaddr(req, i). */
196 for (i = 0; i < invcount; i++) {
197 ret = m2p_remove_override(
198 virt_to_page(unmap[i].host_addr), false);
200 printk(KERN_ALERT "Failed to remove M2P override for " \
201 "%lx\n", (unsigned long)unmap[i].host_addr);
207 /******************************************************************
208 * SCHEDULER FUNCTIONS
211 static void print_stats(blkif_t *blkif)
213 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
214 current->comm, blkif->st_oo_req,
215 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
216 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
217 blkif->st_rd_req = 0;
218 blkif->st_wr_req = 0;
219 blkif->st_oo_req = 0;
222 int blkif_schedule(void *arg)
224 blkif_t *blkif = arg;
225 struct vbd *vbd = &blkif->vbd;
230 printk(KERN_DEBUG "%s: started\n", current->comm);
232 while (!kthread_should_stop()) {
235 if (unlikely(vbd->size != vbd_size(vbd)))
238 wait_event_interruptible(
240 blkif->waiting_reqs || kthread_should_stop());
241 wait_event_interruptible(
242 blkbk->pending_free_wq,
243 !list_empty(&blkbk->pending_free) || kthread_should_stop());
245 blkif->waiting_reqs = 0;
246 smp_mb(); /* clear flag *before* checking for work */
248 if (do_block_io_op(blkif))
249 blkif->waiting_reqs = 1;
252 if (log_stats && time_after(jiffies, blkif->st_print))
259 printk(KERN_DEBUG "%s: exiting\n", current->comm);
261 blkif->xenblkd = NULL;
267 /******************************************************************
268 * COMPLETION CALLBACK -- Called as bh->b_end_io()
271 static void __end_block_io_op(pending_req_t *pending_req, int error)
273 /* An error fails the entire request. */
274 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
275 (error == -EOPNOTSUPP)) {
276 DPRINTK("blkback: write barrier op failed, not supported\n");
277 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
278 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
280 DPRINTK("Buffer not up-to-date at end of operation, "
281 "error=%d\n", error);
282 pending_req->status = BLKIF_RSP_ERROR;
285 if (atomic_dec_and_test(&pending_req->pendcnt)) {
286 fast_flush_area(pending_req);
287 make_response(pending_req->blkif, pending_req->id,
288 pending_req->operation, pending_req->status);
289 blkif_put(pending_req->blkif);
290 free_req(pending_req);
294 static void end_block_io_op(struct bio *bio, int error)
296 __end_block_io_op(bio->bi_private, error);
301 /******************************************************************************
302 * NOTIFICATION FROM GUEST OS.
305 static void blkif_notify_work(blkif_t *blkif)
307 blkif->waiting_reqs = 1;
311 irqreturn_t blkif_be_int(int irq, void *dev_id)
313 blkif_notify_work(dev_id);
319 /******************************************************************
320 * DOWNWARD CALLS -- These interface with the block-device layer proper.
323 static int do_block_io_op(blkif_t *blkif)
325 union blkif_back_rings *blk_rings = &blkif->blk_rings;
326 struct blkif_request req;
327 pending_req_t *pending_req;
331 rc = blk_rings->common.req_cons;
332 rp = blk_rings->common.sring->req_prod;
333 rmb(); /* Ensure we see queued requests up to 'rp'. */
337 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
340 if (kthread_should_stop()) {
345 pending_req = alloc_req();
346 if (NULL == pending_req) {
352 switch (blkif->blk_protocol) {
353 case BLKIF_PROTOCOL_NATIVE:
354 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
356 case BLKIF_PROTOCOL_X86_32:
357 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
359 case BLKIF_PROTOCOL_X86_64:
360 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
365 blk_rings->common.req_cons = ++rc; /* before make_response() */
367 /* Apply all sanity checks to /private copy/ of request. */
370 switch (req.operation) {
373 dispatch_rw_block_io(blkif, &req, pending_req);
375 case BLKIF_OP_WRITE_BARRIER:
380 dispatch_rw_block_io(blkif, &req, pending_req);
383 /* A good sign something is wrong: sleep for a while to
384 * avoid excessive CPU consumption by a bad guest. */
386 DPRINTK("error: unknown block io operation [%d]\n",
388 make_response(blkif, req.id, req.operation,
390 free_req(pending_req);
394 /* Yield point for this unbounded loop. */
401 static void dispatch_rw_block_io(blkif_t *blkif,
402 struct blkif_request *req,
403 pending_req_t *pending_req)
405 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
406 struct phys_req preq;
408 unsigned long buf; unsigned int nsec;
409 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
411 struct bio *bio = NULL;
415 switch (req->operation) {
422 case BLKIF_OP_WRITE_BARRIER:
423 operation = REQ_FLUSH | REQ_FUA;
426 operation = 0; /* make gcc happy */
430 /* Check that number of segments is sane. */
431 nseg = req->nr_segments;
432 if (unlikely(nseg == 0 && operation != (REQ_FLUSH | REQ_FUA)) ||
433 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
434 DPRINTK("Bad number of segments in request (%d)\n", nseg);
438 preq.dev = req->handle;
439 preq.sector_number = req->u.rw.sector_number;
442 pending_req->blkif = blkif;
443 pending_req->id = req->id;
444 pending_req->operation = req->operation;
445 pending_req->status = BLKIF_RSP_OKAY;
446 pending_req->nr_pages = nseg;
448 for (i = 0; i < nseg; i++) {
451 seg[i].nsec = req->u.rw.seg[i].last_sect -
452 req->u.rw.seg[i].first_sect + 1;
454 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
455 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
457 preq.nr_sects += seg[i].nsec;
459 flags = GNTMAP_host_map;
460 if (operation != READ)
461 flags |= GNTMAP_readonly;
462 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
463 req->u.rw.seg[i].gref, blkif->domid);
466 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
469 for (i = 0; i < nseg; i++) {
470 if (unlikely(map[i].status != 0)) {
471 DPRINTK("invalid buffer -- could not remap it\n");
472 map[i].handle = BLKBACK_INVALID_HANDLE;
476 pending_handle(pending_req, i) = map[i].handle;
481 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
482 blkbk->pending_page(pending_req, i), false);
484 printk(KERN_ALERT "Failed to install M2P override for"\
485 " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret);
489 seg[i].buf = map[i].dev_bus_addr |
490 (req->u.rw.seg[i].first_sect << 9);
496 if (vbd_translate(&preq, blkif, operation) != 0) {
497 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
498 operation == READ ? "read" : "write",
500 preq.sector_number + preq.nr_sects, preq.dev);
504 plug_queue(blkif, preq.bdev);
505 atomic_set(&pending_req->pendcnt, 1);
508 for (i = 0; i < nseg; i++) {
509 if (((int)preq.sector_number|(int)seg[i].nsec) &
510 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
511 DPRINTK("Misaligned I/O request from domain %d",
516 while ((bio == NULL) ||
518 blkbk->pending_page(pending_req, i),
520 seg[i].buf & ~PAGE_MASK) == 0)) {
522 atomic_inc(&pending_req->pendcnt);
523 submit_bio(operation, bio);
526 bio = bio_alloc(GFP_KERNEL, nseg-i);
527 if (unlikely(bio == NULL))
530 bio->bi_bdev = preq.bdev;
531 bio->bi_private = pending_req;
532 bio->bi_end_io = end_block_io_op;
533 bio->bi_sector = preq.sector_number;
536 preq.sector_number += seg[i].nsec;
540 BUG_ON(operation != (REQ_FLUSH | REQ_FUA));
541 bio = bio_alloc(GFP_KERNEL, 0);
542 if (unlikely(bio == NULL))
545 bio->bi_bdev = preq.bdev;
546 bio->bi_private = pending_req;
547 bio->bi_end_io = end_block_io_op;
551 submit_bio(operation, bio);
553 if (operation == READ)
554 blkif->st_rd_sect += preq.nr_sects;
555 else if (operation == WRITE || operation == (REQ_FLUSH | REQ_FUA))
556 blkif->st_wr_sect += preq.nr_sects;
561 fast_flush_area(pending_req);
563 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
564 free_req(pending_req);
565 msleep(1); /* back off a bit */
569 __end_block_io_op(pending_req, -EINVAL);
573 msleep(1); /* back off a bit */
579 /******************************************************************
580 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
584 static void make_response(blkif_t *blkif, u64 id,
585 unsigned short op, int st)
587 struct blkif_response resp;
589 union blkif_back_rings *blk_rings = &blkif->blk_rings;
597 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
598 /* Place on the response ring for the relevant domain. */
599 switch (blkif->blk_protocol) {
600 case BLKIF_PROTOCOL_NATIVE:
601 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
602 &resp, sizeof(resp));
604 case BLKIF_PROTOCOL_X86_32:
605 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
606 &resp, sizeof(resp));
608 case BLKIF_PROTOCOL_X86_64:
609 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
610 &resp, sizeof(resp));
615 blk_rings->common.rsp_prod_pvt++;
616 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
617 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
619 * Tail check for pending requests. Allows frontend to avoid
620 * notifications if requests are already in flight (lower
621 * overheads and promotes batching).
623 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
625 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
629 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
632 blkif_notify_work(blkif);
634 notify_remote_via_irq(blkif->irq);
637 static int __init blkif_init(void)
642 if (!xen_pv_domain())
645 blkbk = (struct xen_blkbk *)kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
647 printk(KERN_ALERT "%s: out of memory!\n", __func__);
651 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
653 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
654 blkif_reqs, GFP_KERNEL);
655 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
656 mmap_pages, GFP_KERNEL);
657 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
658 mmap_pages, GFP_KERNEL);
660 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
665 for (i = 0; i < mmap_pages; i++) {
666 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
667 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
668 if (blkbk->pending_pages[i] == NULL) {
673 rc = blkif_interface_init();
677 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
679 INIT_LIST_HEAD(&blkbk->pending_free);
680 spin_lock_init(&blkbk->pending_free_lock);
681 init_waitqueue_head(&blkbk->pending_free_wq);
683 for (i = 0; i < blkif_reqs; i++)
684 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
686 rc = blkif_xenbus_init();
693 printk(KERN_ERR "%s: out of memory\n", __func__);
695 kfree(blkbk->pending_reqs);
696 kfree(blkbk->pending_grant_handles);
697 for (i = 0; i < mmap_pages; i++) {
698 if (blkbk->pending_pages[i])
699 __free_page(blkbk->pending_pages[i]);
701 kfree(blkbk->pending_pages);
707 module_init(blkif_init);
709 MODULE_LICENSE("Dual BSD/GPL");