1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
43 #include <xen/events.h>
45 #include <asm/xen/hypervisor.h>
46 #include <asm/xen/hypercall.h>
49 #define WRITE_BARRIER (REQ_WRITE | REQ_FLUSH | REQ_FUA)
52 * These are rather arbitrary. They are fairly large because adjacent requests
53 * pulled from a communication ring are quite likely to end up being part of
54 * the same scatter/gather request at the disc.
56 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
58 * This will increase the chances of being able to write whole tracks.
59 * 64 should be enough to keep us competitive with Linux.
61 static int blkif_reqs = 64;
62 module_param_named(reqs, blkif_reqs, int, 0);
63 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
65 /* Run-time switchable: /sys/module/blkback/parameters/ */
66 static unsigned int log_stats;
67 static unsigned int debug_lvl;
68 module_param(log_stats, int, 0644);
69 module_param(debug_lvl, int, 0644);
72 * Each outstanding request that we've passed to the lower device layers has a
73 * 'pending_req' allocated to it. Each buffer_head that completes decrements
74 * the pendcnt towards zero. When it hits zero, the specified domain has a
75 * response queued for it, with the saved 'id' passed back.
78 struct blkif_st *blkif;
82 unsigned short operation;
84 struct list_head free_list;
87 #define BLKBACK_INVALID_HANDLE (~0)
90 struct pending_req *pending_reqs;
91 /* List of all 'pending_req' available */
92 struct list_head pending_free;
93 /* And its spinlock. */
94 spinlock_t pending_free_lock;
95 wait_queue_head_t pending_free_wq;
96 /* The list of all pages that are available. */
97 struct page **pending_pages;
98 /* And the grant handles that are available. */
99 grant_handle_t *pending_grant_handles;
102 static struct xen_blkbk *blkbk;
105 * Little helpful macro to figure out the index and virtual address of the
106 * pending_pages[..]. For each 'pending_req' we have have up to
107 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
108 * 10 and would index in the pending_pages[..]. */
109 static inline int vaddr_pagenr(struct pending_req *req, int seg)
111 return (req - blkbk->pending_reqs) *
112 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
115 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
117 static inline unsigned long vaddr(struct pending_req *req, int seg)
119 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
120 return (unsigned long)pfn_to_kaddr(pfn);
123 #define pending_handle(_req, _seg) \
124 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
127 static int do_block_io_op(struct blkif_st *blkif);
128 static void dispatch_rw_block_io(struct blkif_st *blkif,
129 struct blkif_request *req,
130 struct pending_req *pending_req);
131 static void make_response(struct blkif_st *blkif, u64 id,
132 unsigned short op, int st);
135 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
137 static struct pending_req *alloc_req(void)
139 struct pending_req *req = NULL;
142 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
143 if (!list_empty(&blkbk->pending_free)) {
144 req = list_entry(blkbk->pending_free.next, struct pending_req,
146 list_del(&req->free_list);
148 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
153 * Return the 'pending_req' structure back to the freepool. We also
154 * wake up the thread if it was waiting for a free page.
156 static void free_req(struct pending_req *req)
161 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
162 was_empty = list_empty(&blkbk->pending_free);
163 list_add(&req->free_list, &blkbk->pending_free);
164 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
166 wake_up(&blkbk->pending_free_wq);
170 * Notification from the guest OS.
172 static void blkif_notify_work(struct blkif_st *blkif)
174 blkif->waiting_reqs = 1;
178 irqreturn_t blkif_be_int(int irq, void *dev_id)
180 blkif_notify_work(dev_id);
185 * SCHEDULER FUNCTIONS
188 static void print_stats(struct blkif_st *blkif)
190 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
191 current->comm, blkif->st_oo_req,
192 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
193 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
194 blkif->st_rd_req = 0;
195 blkif->st_wr_req = 0;
196 blkif->st_oo_req = 0;
199 int blkif_schedule(void *arg)
201 struct blkif_st *blkif = arg;
202 struct vbd *vbd = &blkif->vbd;
207 printk(KERN_DEBUG "%s: started\n", current->comm);
209 while (!kthread_should_stop()) {
212 if (unlikely(vbd->size != vbd_size(vbd)))
215 wait_event_interruptible(
217 blkif->waiting_reqs || kthread_should_stop());
218 wait_event_interruptible(
219 blkbk->pending_free_wq,
220 !list_empty(&blkbk->pending_free) ||
221 kthread_should_stop());
223 blkif->waiting_reqs = 0;
224 smp_mb(); /* clear flag *before* checking for work */
226 if (do_block_io_op(blkif))
227 blkif->waiting_reqs = 1;
229 if (log_stats && time_after(jiffies, blkif->st_print))
236 printk(KERN_DEBUG "%s: exiting\n", current->comm);
238 blkif->xenblkd = NULL;
245 * Unmap the grant references, and also remove the M2P over-rides
246 * used in the 'pending_req'.
248 static void fast_flush_area(struct pending_req *req)
250 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
251 unsigned int i, invcount = 0;
252 grant_handle_t handle;
255 for (i = 0; i < req->nr_pages; i++) {
256 handle = pending_handle(req, i);
257 if (handle == BLKBACK_INVALID_HANDLE)
259 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
260 GNTMAP_host_map, handle);
261 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
265 ret = HYPERVISOR_grant_table_op(
266 GNTTABOP_unmap_grant_ref, unmap, invcount);
268 /* Note, we use invcount, so nr->pages, so we can't index
269 * using vaddr(req, i).
271 for (i = 0; i < invcount; i++) {
272 ret = m2p_remove_override(
273 virt_to_page(unmap[i].host_addr), false);
275 printk(KERN_ALERT "Failed to remove M2P override for " \
276 "%lx\n", (unsigned long)unmap[i].host_addr);
282 * Completion callback on the bio's. Called as bh->b_end_io()
285 static void __end_block_io_op(struct pending_req *pending_req, int error)
287 /* An error fails the entire request. */
288 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
289 (error == -EOPNOTSUPP)) {
290 DPRINTK("blkback: write barrier op failed, not supported\n");
291 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
292 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
294 DPRINTK("Buffer not up-to-date at end of operation, "
295 "error=%d\n", error);
296 pending_req->status = BLKIF_RSP_ERROR;
299 /* If all of the bio's have completed it is time to unmap
300 * the grant references associated with 'request' and provide
301 * the proper response on the ring.
303 if (atomic_dec_and_test(&pending_req->pendcnt)) {
304 fast_flush_area(pending_req);
305 make_response(pending_req->blkif, pending_req->id,
306 pending_req->operation, pending_req->status);
307 blkif_put(pending_req->blkif);
308 free_req(pending_req);
315 static void end_block_io_op(struct bio *bio, int error)
317 __end_block_io_op(bio->bi_private, error);
324 * Function to copy the from the ring buffer the 'struct blkif_request'
325 * (which has the sectors we want, number of them, grant references, etc),
326 * and transmute it to the block API to hand it over to the proper block disk.
328 static int do_block_io_op(struct blkif_st *blkif)
330 union blkif_back_rings *blk_rings = &blkif->blk_rings;
331 struct blkif_request req;
332 struct pending_req *pending_req;
336 rc = blk_rings->common.req_cons;
337 rp = blk_rings->common.sring->req_prod;
338 rmb(); /* Ensure we see queued requests up to 'rp'. */
342 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
345 if (kthread_should_stop()) {
350 pending_req = alloc_req();
351 if (NULL == pending_req) {
357 switch (blkif->blk_protocol) {
358 case BLKIF_PROTOCOL_NATIVE:
359 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
361 case BLKIF_PROTOCOL_X86_32:
362 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
364 case BLKIF_PROTOCOL_X86_64:
365 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
370 blk_rings->common.req_cons = ++rc; /* before make_response() */
372 /* Apply all sanity checks to /private copy/ of request. */
375 switch (req.operation) {
378 dispatch_rw_block_io(blkif, &req, pending_req);
380 case BLKIF_OP_WRITE_BARRIER:
385 dispatch_rw_block_io(blkif, &req, pending_req);
388 /* A good sign something is wrong: sleep for a while to
389 * avoid excessive CPU consumption by a bad guest. */
391 DPRINTK("error: unknown block io operation [%d]\n",
393 make_response(blkif, req.id, req.operation,
395 free_req(pending_req);
399 /* Yield point for this unbounded loop. */
407 * Transumation of the 'struct blkif_request' to a proper 'struct bio'
408 * and call the 'submit_bio' to pass it to the underlaying storage.
410 static void dispatch_rw_block_io(struct blkif_st *blkif,
411 struct blkif_request *req,
412 struct pending_req *pending_req)
414 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
415 struct phys_req preq;
417 unsigned long buf; unsigned int nsec;
418 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
420 struct bio *bio = NULL;
421 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
422 int ret, i, nbio = 0;
424 struct blk_plug plug;
425 struct request_queue *q;
427 switch (req->operation) {
434 case BLKIF_OP_WRITE_BARRIER:
435 operation = WRITE_BARRIER;
438 operation = 0; /* make gcc happy */
442 /* Check that the number of segments is sane. */
443 nseg = req->nr_segments;
444 if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
445 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
446 DPRINTK("Bad number of segments in request (%d)\n", nseg);
450 preq.dev = req->handle;
451 preq.sector_number = req->u.rw.sector_number;
454 pending_req->blkif = blkif;
455 pending_req->id = req->id;
456 pending_req->operation = req->operation;
457 pending_req->status = BLKIF_RSP_OKAY;
458 pending_req->nr_pages = nseg;
460 /* Fill out preq.nr_sects with proper amount of sectors, and setup
461 * assign map[..] with the PFN of the page in our domain with the
462 * corresponding grant reference for each page.
464 for (i = 0; i < nseg; i++) {
467 seg[i].nsec = req->u.rw.seg[i].last_sect -
468 req->u.rw.seg[i].first_sect + 1;
469 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
470 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
472 preq.nr_sects += seg[i].nsec;
474 flags = GNTMAP_host_map;
475 if (operation != READ)
476 flags |= GNTMAP_readonly;
477 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
478 req->u.rw.seg[i].gref, blkif->domid);
481 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
484 /* Now swizzel the MFN in our domain with the MFN from the other domain
485 * so that when we access vaddr(pending_req,i) it has the contents of
486 * the page from the other domain.
488 for (i = 0; i < nseg; i++) {
489 if (unlikely(map[i].status != 0)) {
490 DPRINTK("invalid buffer -- could not remap it\n");
491 map[i].handle = BLKBACK_INVALID_HANDLE;
495 pending_handle(pending_req, i) = map[i].handle;
500 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
501 blkbk->pending_page(pending_req, i), false);
503 printk(KERN_ALERT "Failed to install M2P override for"\
504 " %lx (ret: %d)\n", (unsigned long)
505 map[i].dev_bus_addr, ret);
506 /* We could switch over to GNTTABOP_copy */
510 seg[i].buf = map[i].dev_bus_addr |
511 (req->u.rw.seg[i].first_sect << 9);
514 /* If we have failed at this point, we need to undo the M2P override,
515 * set gnttab_set_unmap_op on all of the grant references and perform
516 * the hypercall to unmap the grants - that is all done in
522 if (vbd_translate(&preq, blkif, operation) != 0) {
523 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
524 operation == READ ? "read" : "write",
526 preq.sector_number + preq.nr_sects, preq.dev);
530 /* This corresponding blkif_put is done in __end_block_io_op */
533 for (i = 0; i < nseg; i++) {
534 if (((int)preq.sector_number|(int)seg[i].nsec) &
535 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
536 DPRINTK("Misaligned I/O request from domain %d",
541 while ((bio == NULL) ||
543 blkbk->pending_page(pending_req, i),
545 seg[i].buf & ~PAGE_MASK) == 0)) {
547 bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
548 if (unlikely(bio == NULL))
551 bio->bi_bdev = preq.bdev;
552 bio->bi_private = pending_req;
553 bio->bi_end_io = end_block_io_op;
554 bio->bi_sector = preq.sector_number;
557 preq.sector_number += seg[i].nsec;
560 /* This will be hit if the operation was a barrier. */
562 BUG_ON(operation != WRITE_BARRIER);
563 bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
564 if (unlikely(bio == NULL))
567 bio->bi_bdev = preq.bdev;
568 bio->bi_private = pending_req;
569 bio->bi_end_io = end_block_io_op;
574 /* We set it one so that the last submit_bio does not have to call
577 atomic_set(&pending_req->pendcnt, nbio);
579 /* Get a reference count for the disk queue and start sending I/O */
581 blk_start_plug(&plug);
583 for (i = 0; i < nbio; i++)
584 submit_bio(operation, biolist[i]);
586 blk_finish_plug(&plug);
587 /* Let the I/Os go.. */
590 if (operation == READ)
591 blkif->st_rd_sect += preq.nr_sects;
592 else if (operation == WRITE || operation == WRITE_BARRIER)
593 blkif->st_wr_sect += preq.nr_sects;
598 fast_flush_area(pending_req);
600 /* Haven't submitted any bio's yet. */
601 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
602 free_req(pending_req);
603 msleep(1); /* back off a bit */
607 for (i = 0; i < (nbio-1); i++)
609 __end_block_io_op(pending_req, -EINVAL);
610 msleep(1); /* back off a bit */
617 * Put a response on the ring on how the operation fared.
619 static void make_response(struct blkif_st *blkif, u64 id,
620 unsigned short op, int st)
622 struct blkif_response resp;
624 union blkif_back_rings *blk_rings = &blkif->blk_rings;
632 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
633 /* Place on the response ring for the relevant domain. */
634 switch (blkif->blk_protocol) {
635 case BLKIF_PROTOCOL_NATIVE:
636 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
637 &resp, sizeof(resp));
639 case BLKIF_PROTOCOL_X86_32:
640 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
641 &resp, sizeof(resp));
643 case BLKIF_PROTOCOL_X86_64:
644 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
645 &resp, sizeof(resp));
650 blk_rings->common.rsp_prod_pvt++;
651 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
652 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
654 * Tail check for pending requests. Allows frontend to avoid
655 * notifications if requests are already in flight (lower
656 * overheads and promotes batching).
658 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
660 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
664 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
667 blkif_notify_work(blkif);
669 notify_remote_via_irq(blkif->irq);
672 static int __init blkif_init(void)
677 if (!xen_pv_domain())
680 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
682 printk(KERN_ALERT "%s: out of memory!\n", __func__);
686 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
688 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
689 blkif_reqs, GFP_KERNEL);
690 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
691 mmap_pages, GFP_KERNEL);
692 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
693 mmap_pages, GFP_KERNEL);
695 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
696 !blkbk->pending_pages) {
701 for (i = 0; i < mmap_pages; i++) {
702 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
703 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
704 if (blkbk->pending_pages[i] == NULL) {
709 rc = blkif_interface_init();
713 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
715 INIT_LIST_HEAD(&blkbk->pending_free);
716 spin_lock_init(&blkbk->pending_free_lock);
717 init_waitqueue_head(&blkbk->pending_free_wq);
719 for (i = 0; i < blkif_reqs; i++)
720 list_add_tail(&blkbk->pending_reqs[i].free_list,
721 &blkbk->pending_free);
723 rc = blkif_xenbus_init();
730 printk(KERN_ERR "%s: out of memory\n", __func__);
732 kfree(blkbk->pending_reqs);
733 kfree(blkbk->pending_grant_handles);
734 for (i = 0; i < mmap_pages; i++) {
735 if (blkbk->pending_pages[i])
736 __free_page(blkbk->pending_pages[i]);
738 kfree(blkbk->pending_pages);
744 module_init(blkif_init);
746 MODULE_LICENSE("Dual BSD/GPL");