1 /******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/delay.h>
42 #include <xen/balloon.h>
43 #include <asm/hypervisor.h>
47 * These are rather arbitrary. They are fairly large because adjacent requests
48 * pulled from a communication ring are quite likely to end up being part of
49 * the same scatter/gather request at the disc.
51 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
53 * This will increase the chances of being able to write whole tracks.
54 * 64 should be enough to keep us competitive with Linux.
56 static int blkif_reqs = 64;
57 module_param_named(reqs, blkif_reqs, int, 0);
58 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
60 /* Run-time switchable: /sys/module/blkback/parameters/ */
61 static unsigned int log_stats = 0;
62 static unsigned int debug_lvl = 0;
63 module_param(log_stats, int, 0644);
64 module_param(debug_lvl, int, 0644);
67 * Each outstanding request that we've passed to the lower device layers has a
68 * 'pending_req' allocated to it. Each buffer_head that completes decrements
69 * the pendcnt towards zero. When it hits zero, the specified domain has a
70 * response queued for it, with the saved 'id' passed back.
77 unsigned short operation;
79 struct list_head free_list;
82 static pending_req_t *pending_reqs;
83 static struct list_head pending_free;
84 static DEFINE_SPINLOCK(pending_free_lock);
85 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
87 #define BLKBACK_INVALID_HANDLE (~0)
89 static struct page **pending_pages;
90 static grant_handle_t *pending_grant_handles;
92 static inline int vaddr_pagenr(pending_req_t *req, int seg)
94 return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
97 static inline unsigned long vaddr(pending_req_t *req, int seg)
99 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
100 return (unsigned long)pfn_to_kaddr(pfn);
103 #define pending_handle(_req, _seg) \
104 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
107 static int do_block_io_op(blkif_t *blkif);
108 static void dispatch_rw_block_io(blkif_t *blkif,
109 blkif_request_t *req,
110 pending_req_t *pending_req);
111 static void make_response(blkif_t *blkif, u64 id,
112 unsigned short op, int st);
114 /******************************************************************
117 static pending_req_t* alloc_req(void)
119 pending_req_t *req = NULL;
122 spin_lock_irqsave(&pending_free_lock, flags);
123 if (!list_empty(&pending_free)) {
124 req = list_entry(pending_free.next, pending_req_t, free_list);
125 list_del(&req->free_list);
127 spin_unlock_irqrestore(&pending_free_lock, flags);
131 static void free_req(pending_req_t *req)
136 spin_lock_irqsave(&pending_free_lock, flags);
137 was_empty = list_empty(&pending_free);
138 list_add(&req->free_list, &pending_free);
139 spin_unlock_irqrestore(&pending_free_lock, flags);
141 wake_up(&pending_free_wq);
144 static void unplug_queue(blkif_t *blkif)
146 if (blkif->plug == NULL)
148 if (blkif->plug->unplug_fn)
149 blkif->plug->unplug_fn(blkif->plug);
150 blk_put_queue(blkif->plug);
154 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
156 request_queue_t *q = bdev_get_queue(bdev);
158 if (q == blkif->plug)
165 static void fast_flush_area(pending_req_t *req)
167 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
168 unsigned int i, invcount = 0;
169 grant_handle_t handle;
172 for (i = 0; i < req->nr_pages; i++) {
173 handle = pending_handle(req, i);
174 if (handle == BLKBACK_INVALID_HANDLE)
176 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
177 GNTMAP_host_map, handle);
178 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
182 ret = HYPERVISOR_grant_table_op(
183 GNTTABOP_unmap_grant_ref, unmap, invcount);
187 /******************************************************************
188 * SCHEDULER FUNCTIONS
191 static void print_stats(blkif_t *blkif)
193 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
194 current->comm, blkif->st_oo_req,
195 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
196 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
197 blkif->st_rd_req = 0;
198 blkif->st_wr_req = 0;
199 blkif->st_oo_req = 0;
202 int blkif_schedule(void *arg)
204 blkif_t *blkif = arg;
209 printk(KERN_DEBUG "%s: started\n", current->comm);
211 while (!kthread_should_stop()) {
215 wait_event_interruptible(
217 blkif->waiting_reqs || kthread_should_stop());
218 wait_event_interruptible(
220 !list_empty(&pending_free) || kthread_should_stop());
222 blkif->waiting_reqs = 0;
223 smp_mb(); /* clear flag *before* checking for work */
225 if (do_block_io_op(blkif))
226 blkif->waiting_reqs = 1;
229 if (log_stats && time_after(jiffies, blkif->st_print))
236 printk(KERN_DEBUG "%s: exiting\n", current->comm);
238 blkif->xenblkd = NULL;
244 /******************************************************************
245 * COMPLETION CALLBACK -- Called as bh->b_end_io()
248 static void __end_block_io_op(pending_req_t *pending_req, int error)
250 /* An error fails the entire request. */
251 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
252 (error == -EOPNOTSUPP)) {
253 DPRINTK("blkback: write barrier op failed, not supported\n");
254 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
255 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
257 DPRINTK("Buffer not up-to-date at end of operation, "
258 "error=%d\n", error);
259 pending_req->status = BLKIF_RSP_ERROR;
262 if (atomic_dec_and_test(&pending_req->pendcnt)) {
263 fast_flush_area(pending_req);
264 make_response(pending_req->blkif, pending_req->id,
265 pending_req->operation, pending_req->status);
266 blkif_put(pending_req->blkif);
267 free_req(pending_req);
271 static int end_block_io_op(struct bio *bio, unsigned int done, int error)
273 if (bio->bi_size != 0)
275 __end_block_io_op(bio->bi_private, error);
281 /******************************************************************************
282 * NOTIFICATION FROM GUEST OS.
285 static void blkif_notify_work(blkif_t *blkif)
287 blkif->waiting_reqs = 1;
291 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
293 blkif_notify_work(dev_id);
299 /******************************************************************
300 * DOWNWARD CALLS -- These interface with the block-device layer proper.
303 static int do_block_io_op(blkif_t *blkif)
305 blkif_back_rings_t *blk_rings = &blkif->blk_rings;
307 pending_req_t *pending_req;
311 rc = blk_rings->common.req_cons;
312 rp = blk_rings->common.sring->req_prod;
313 rmb(); /* Ensure we see queued requests up to 'rp'. */
317 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
320 pending_req = alloc_req();
321 if (NULL == pending_req) {
327 if (kthread_should_stop()) {
332 switch (blkif->blk_protocol) {
333 case BLKIF_PROTOCOL_NATIVE:
334 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
336 case BLKIF_PROTOCOL_X86_32:
337 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
339 case BLKIF_PROTOCOL_X86_64:
340 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
345 blk_rings->common.req_cons = ++rc; /* before make_response() */
347 /* Apply all sanity checks to /private copy/ of request. */
350 switch (req.operation) {
353 dispatch_rw_block_io(blkif, &req, pending_req);
355 case BLKIF_OP_WRITE_BARRIER:
360 dispatch_rw_block_io(blkif, &req, pending_req);
363 /* A good sign something is wrong: sleep for a while to
364 * avoid excessive CPU consumption by a bad guest. */
366 DPRINTK("error: unknown block io operation [%d]\n",
368 make_response(blkif, req.id, req.operation,
370 free_req(pending_req);
374 /* Yield point for this unbounded loop. */
381 static void dispatch_rw_block_io(blkif_t *blkif,
382 blkif_request_t *req,
383 pending_req_t *pending_req)
385 extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
386 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
387 struct phys_req preq;
389 unsigned long buf; unsigned int nsec;
390 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
392 struct bio *bio = NULL;
396 switch (req->operation) {
403 case BLKIF_OP_WRITE_BARRIER:
404 operation = WRITE_BARRIER;
407 operation = 0; /* make gcc happy */
411 /* Check that number of segments is sane. */
412 nseg = req->nr_segments;
413 if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
414 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
415 DPRINTK("Bad number of segments in request (%d)\n", nseg);
419 preq.dev = req->handle;
420 preq.sector_number = req->sector_number;
423 pending_req->blkif = blkif;
424 pending_req->id = req->id;
425 pending_req->operation = req->operation;
426 pending_req->status = BLKIF_RSP_OKAY;
427 pending_req->nr_pages = nseg;
429 for (i = 0; i < nseg; i++) {
432 seg[i].nsec = req->seg[i].last_sect -
433 req->seg[i].first_sect + 1;
435 if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
436 (req->seg[i].last_sect < req->seg[i].first_sect))
438 preq.nr_sects += seg[i].nsec;
440 flags = GNTMAP_host_map;
441 if (operation != READ)
442 flags |= GNTMAP_readonly;
443 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
444 req->seg[i].gref, blkif->domid);
447 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
450 for (i = 0; i < nseg; i++) {
451 if (unlikely(map[i].status != 0)) {
452 DPRINTK("invalid buffer -- could not remap it\n");
453 map[i].handle = BLKBACK_INVALID_HANDLE;
457 pending_handle(pending_req, i) = map[i].handle;
462 set_phys_to_machine(__pa(vaddr(
463 pending_req, i)) >> PAGE_SHIFT,
464 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
465 seg[i].buf = map[i].dev_bus_addr |
466 (req->seg[i].first_sect << 9);
472 if (vbd_translate(&preq, blkif, operation) != 0) {
473 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
474 operation == READ ? "read" : "write",
476 preq.sector_number + preq.nr_sects, preq.dev);
480 plug_queue(blkif, preq.bdev);
481 atomic_set(&pending_req->pendcnt, 1);
484 for (i = 0; i < nseg; i++) {
485 if (((int)preq.sector_number|(int)seg[i].nsec) &
486 ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
487 DPRINTK("Misaligned I/O request from domain %d",
492 while ((bio == NULL) ||
494 virt_to_page(vaddr(pending_req, i)),
496 seg[i].buf & ~PAGE_MASK) == 0)) {
498 atomic_inc(&pending_req->pendcnt);
499 submit_bio(operation, bio);
502 bio = bio_alloc(GFP_KERNEL, nseg-i);
503 if (unlikely(bio == NULL))
506 bio->bi_bdev = preq.bdev;
507 bio->bi_private = pending_req;
508 bio->bi_end_io = end_block_io_op;
509 bio->bi_sector = preq.sector_number;
512 preq.sector_number += seg[i].nsec;
516 BUG_ON(operation != WRITE_BARRIER);
517 bio = bio_alloc(GFP_KERNEL, 0);
518 if (unlikely(bio == NULL))
521 bio->bi_bdev = preq.bdev;
522 bio->bi_private = pending_req;
523 bio->bi_end_io = end_block_io_op;
527 submit_bio(operation, bio);
529 if (operation == READ)
530 blkif->st_rd_sect += preq.nr_sects;
531 else if (operation == WRITE || operation == WRITE_BARRIER)
532 blkif->st_wr_sect += preq.nr_sects;
537 fast_flush_area(pending_req);
539 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
540 free_req(pending_req);
541 msleep(1); /* back off a bit */
545 __end_block_io_op(pending_req, -EINVAL);
549 msleep(1); /* back off a bit */
555 /******************************************************************
556 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
560 static void make_response(blkif_t *blkif, u64 id,
561 unsigned short op, int st)
563 blkif_response_t resp;
565 blkif_back_rings_t *blk_rings = &blkif->blk_rings;
573 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
574 /* Place on the response ring for the relevant domain. */
575 switch (blkif->blk_protocol) {
576 case BLKIF_PROTOCOL_NATIVE:
577 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
578 &resp, sizeof(resp));
580 case BLKIF_PROTOCOL_X86_32:
581 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
582 &resp, sizeof(resp));
584 case BLKIF_PROTOCOL_X86_64:
585 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
586 &resp, sizeof(resp));
591 blk_rings->common.rsp_prod_pvt++;
592 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
593 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
595 * Tail check for pending requests. Allows frontend to avoid
596 * notifications if requests are already in flight (lower
597 * overheads and promotes batching).
599 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
601 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
605 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
608 blkif_notify_work(blkif);
610 notify_remote_via_irq(blkif->irq);
613 static int __init blkif_init(void)
617 if (!is_running_on_xen())
620 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
622 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
623 blkif_reqs, GFP_KERNEL);
624 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
625 mmap_pages, GFP_KERNEL);
626 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
628 if (!pending_reqs || !pending_grant_handles || !pending_pages)
631 for (i = 0; i < mmap_pages; i++)
632 pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
634 blkif_interface_init();
636 memset(pending_reqs, 0, sizeof(pending_reqs));
637 INIT_LIST_HEAD(&pending_free);
639 for (i = 0; i < blkif_reqs; i++)
640 list_add_tail(&pending_reqs[i].free_list, &pending_free);
648 kfree(pending_grant_handles);
649 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
650 printk("%s: out of memory\n", __FUNCTION__);
654 module_init(blkif_init);
656 MODULE_LICENSE("Dual BSD/GPL");