xen/blkback: Cleanup move the code a bit around.
[firefly-linux-kernel-4.4.55.git] / drivers / xen / blkback / blkback.c
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42
43 #include <xen/events.h>
44 #include <xen/page.h>
45 #include <asm/xen/hypervisor.h>
46 #include <asm/xen/hypercall.h>
47 #include "common.h"
48
49 #define WRITE_BARRIER   (REQ_WRITE | REQ_FLUSH | REQ_FUA)
50
51 /*
52  * These are rather arbitrary. They are fairly large because adjacent requests
53  * pulled from a communication ring are quite likely to end up being part of
54  * the same scatter/gather request at the disc.
55  *
56  * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57  *
58  * This will increase the chances of being able to write whole tracks.
59  * 64 should be enough to keep us competitive with Linux.
60  */
61 static int blkif_reqs = 64;
62 module_param_named(reqs, blkif_reqs, int, 0);
63 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64
65 /* Run-time switchable: /sys/module/blkback/parameters/ */
66 static unsigned int log_stats;
67 static unsigned int debug_lvl;
68 module_param(log_stats, int, 0644);
69 module_param(debug_lvl, int, 0644);
70
71 /*
72  * Each outstanding request that we've passed to the lower device layers has a
73  * 'pending_req' allocated to it. Each buffer_head that completes decrements
74  * the pendcnt towards zero. When it hits zero, the specified domain has a
75  * response queued for it, with the saved 'id' passed back.
76  */
77 struct pending_req {
78         struct blkif_st       *blkif;
79         u64            id;
80         int            nr_pages;
81         atomic_t       pendcnt;
82         unsigned short operation;
83         int            status;
84         struct list_head free_list;
85 };
86
87 #define BLKBACK_INVALID_HANDLE (~0)
88
89 struct xen_blkbk {
90         struct pending_req      *pending_reqs;
91         /* List of all 'pending_req' available */
92         struct list_head        pending_free;
93         /* And its spinlock. */
94         spinlock_t              pending_free_lock;
95         wait_queue_head_t       pending_free_wq;
96         /* The list of all pages that are available. */
97         struct page             **pending_pages;
98         /* And the grant handles that are available. */
99         grant_handle_t          *pending_grant_handles;
100 };
101
102 static struct xen_blkbk *blkbk;
103
104 /*
105  * Little helpful macro to figure out the index and virtual address of the
106  * pending_pages[..]. For each 'pending_req' we have have up to
107  * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
108  * 10 and would index in the pending_pages[..]. */
109 static inline int vaddr_pagenr(struct pending_req *req, int seg)
110 {
111         return (req - blkbk->pending_reqs) *
112                 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
113 }
114
115 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
116
117 static inline unsigned long vaddr(struct pending_req *req, int seg)
118 {
119         unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
120         return (unsigned long)pfn_to_kaddr(pfn);
121 }
122
123 #define pending_handle(_req, _seg) \
124         (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
125
126
127 static int do_block_io_op(struct blkif_st *blkif);
128 static void dispatch_rw_block_io(struct blkif_st *blkif,
129                                  struct blkif_request *req,
130                                  struct pending_req *pending_req);
131 static void make_response(struct blkif_st *blkif, u64 id,
132                           unsigned short op, int st);
133
134 /*
135  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
136  */
137 static struct pending_req *alloc_req(void)
138 {
139         struct pending_req *req = NULL;
140         unsigned long flags;
141
142         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
143         if (!list_empty(&blkbk->pending_free)) {
144                 req = list_entry(blkbk->pending_free.next, struct pending_req,
145                                  free_list);
146                 list_del(&req->free_list);
147         }
148         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
149         return req;
150 }
151
152 /*
153  * Return the 'pending_req' structure back to the freepool. We also
154  * wake up the thread if it was waiting for a free page.
155  */
156 static void free_req(struct pending_req *req)
157 {
158         unsigned long flags;
159         int was_empty;
160
161         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
162         was_empty = list_empty(&blkbk->pending_free);
163         list_add(&req->free_list, &blkbk->pending_free);
164         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
165         if (was_empty)
166                 wake_up(&blkbk->pending_free_wq);
167 }
168
169 /*
170  * Notification from the guest OS.
171  */
172 static void blkif_notify_work(struct blkif_st *blkif)
173 {
174         blkif->waiting_reqs = 1;
175         wake_up(&blkif->wq);
176 }
177
178 irqreturn_t blkif_be_int(int irq, void *dev_id)
179 {
180         blkif_notify_work(dev_id);
181         return IRQ_HANDLED;
182 }
183
184 /*
185  * SCHEDULER FUNCTIONS
186  */
187
188 static void print_stats(struct blkif_st *blkif)
189 {
190         printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
191                current->comm, blkif->st_oo_req,
192                blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
193         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
194         blkif->st_rd_req = 0;
195         blkif->st_wr_req = 0;
196         blkif->st_oo_req = 0;
197 }
198
199 int blkif_schedule(void *arg)
200 {
201         struct blkif_st *blkif = arg;
202         struct vbd *vbd = &blkif->vbd;
203
204         blkif_get(blkif);
205
206         if (debug_lvl)
207                 printk(KERN_DEBUG "%s: started\n", current->comm);
208
209         while (!kthread_should_stop()) {
210                 if (try_to_freeze())
211                         continue;
212                 if (unlikely(vbd->size != vbd_size(vbd)))
213                         vbd_resize(blkif);
214
215                 wait_event_interruptible(
216                         blkif->wq,
217                         blkif->waiting_reqs || kthread_should_stop());
218                 wait_event_interruptible(
219                         blkbk->pending_free_wq,
220                         !list_empty(&blkbk->pending_free) ||
221                         kthread_should_stop());
222
223                 blkif->waiting_reqs = 0;
224                 smp_mb(); /* clear flag *before* checking for work */
225
226                 if (do_block_io_op(blkif))
227                         blkif->waiting_reqs = 1;
228
229                 if (log_stats && time_after(jiffies, blkif->st_print))
230                         print_stats(blkif);
231         }
232
233         if (log_stats)
234                 print_stats(blkif);
235         if (debug_lvl)
236                 printk(KERN_DEBUG "%s: exiting\n", current->comm);
237
238         blkif->xenblkd = NULL;
239         blkif_put(blkif);
240
241         return 0;
242 }
243
244 /*
245  * Unmap the grant references, and also remove the M2P over-rides
246  * used in the 'pending_req'.
247 */
248 static void fast_flush_area(struct pending_req *req)
249 {
250         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
251         unsigned int i, invcount = 0;
252         grant_handle_t handle;
253         int ret;
254
255         for (i = 0; i < req->nr_pages; i++) {
256                 handle = pending_handle(req, i);
257                 if (handle == BLKBACK_INVALID_HANDLE)
258                         continue;
259                 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
260                                     GNTMAP_host_map, handle);
261                 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
262                 invcount++;
263         }
264
265         ret = HYPERVISOR_grant_table_op(
266                 GNTTABOP_unmap_grant_ref, unmap, invcount);
267         BUG_ON(ret);
268         /* Note, we use invcount, so nr->pages, so we can't index
269          * using vaddr(req, i).
270          */
271         for (i = 0; i < invcount; i++) {
272                 ret = m2p_remove_override(
273                         virt_to_page(unmap[i].host_addr), false);
274                 if (ret) {
275                         printk(KERN_ALERT "Failed to remove M2P override for " \
276                                 "%lx\n", (unsigned long)unmap[i].host_addr);
277                         continue;
278                 }
279         }
280 }
281 /*
282  * Completion callback on the bio's. Called as bh->b_end_io()
283  */
284
285 static void __end_block_io_op(struct pending_req *pending_req, int error)
286 {
287         /* An error fails the entire request. */
288         if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
289             (error == -EOPNOTSUPP)) {
290                 DPRINTK("blkback: write barrier op failed, not supported\n");
291                 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
292                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
293         } else if (error) {
294                 DPRINTK("Buffer not up-to-date at end of operation, "
295                         "error=%d\n", error);
296                 pending_req->status = BLKIF_RSP_ERROR;
297         }
298
299         /* If all of the bio's have completed it is time to unmap
300          * the grant references associated with 'request' and provide
301          * the proper response on the ring.
302          */
303         if (atomic_dec_and_test(&pending_req->pendcnt)) {
304                 fast_flush_area(pending_req);
305                 make_response(pending_req->blkif, pending_req->id,
306                               pending_req->operation, pending_req->status);
307                 blkif_put(pending_req->blkif);
308                 free_req(pending_req);
309         }
310 }
311
312 /*
313  * bio callback.
314  */
315 static void end_block_io_op(struct bio *bio, int error)
316 {
317         __end_block_io_op(bio->bi_private, error);
318         bio_put(bio);
319 }
320
321
322
323 /*
324  * Function to copy the from the ring buffer the 'struct blkif_request'
325  * (which has the sectors we want, number of them, grant references, etc),
326  * and transmute  it to the block API to hand it over to the proper block disk.
327  */
328 static int do_block_io_op(struct blkif_st *blkif)
329 {
330         union blkif_back_rings *blk_rings = &blkif->blk_rings;
331         struct blkif_request req;
332         struct pending_req *pending_req;
333         RING_IDX rc, rp;
334         int more_to_do = 0;
335
336         rc = blk_rings->common.req_cons;
337         rp = blk_rings->common.sring->req_prod;
338         rmb(); /* Ensure we see queued requests up to 'rp'. */
339
340         while (rc != rp) {
341
342                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
343                         break;
344
345                 if (kthread_should_stop()) {
346                         more_to_do = 1;
347                         break;
348                 }
349
350                 pending_req = alloc_req();
351                 if (NULL == pending_req) {
352                         blkif->st_oo_req++;
353                         more_to_do = 1;
354                         break;
355                 }
356
357                 switch (blkif->blk_protocol) {
358                 case BLKIF_PROTOCOL_NATIVE:
359                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
360                         break;
361                 case BLKIF_PROTOCOL_X86_32:
362                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
363                         break;
364                 case BLKIF_PROTOCOL_X86_64:
365                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
366                         break;
367                 default:
368                         BUG();
369                 }
370                 blk_rings->common.req_cons = ++rc; /* before make_response() */
371
372                 /* Apply all sanity checks to /private copy/ of request. */
373                 barrier();
374
375                 switch (req.operation) {
376                 case BLKIF_OP_READ:
377                         blkif->st_rd_req++;
378                         dispatch_rw_block_io(blkif, &req, pending_req);
379                         break;
380                 case BLKIF_OP_WRITE_BARRIER:
381                         blkif->st_br_req++;
382                         /* fall through */
383                 case BLKIF_OP_WRITE:
384                         blkif->st_wr_req++;
385                         dispatch_rw_block_io(blkif, &req, pending_req);
386                         break;
387                 default:
388                         /* A good sign something is wrong: sleep for a while to
389                          * avoid excessive CPU consumption by a bad guest. */
390                         msleep(1);
391                         DPRINTK("error: unknown block io operation [%d]\n",
392                                 req.operation);
393                         make_response(blkif, req.id, req.operation,
394                                       BLKIF_RSP_ERROR);
395                         free_req(pending_req);
396                         break;
397                 }
398
399                 /* Yield point for this unbounded loop. */
400                 cond_resched();
401         }
402
403         return more_to_do;
404 }
405
406 /*
407  * Transumation of the 'struct blkif_request' to a proper 'struct bio'
408  * and call the 'submit_bio' to pass it to the underlaying storage.
409  */
410 static void dispatch_rw_block_io(struct blkif_st *blkif,
411                                  struct blkif_request *req,
412                                  struct pending_req *pending_req)
413 {
414         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
415         struct phys_req preq;
416         struct {
417                 unsigned long buf; unsigned int nsec;
418         } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
419         unsigned int nseg;
420         struct bio *bio = NULL;
421         struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
422         int ret, i, nbio = 0;
423         int operation;
424         struct blk_plug plug;
425         struct request_queue *q;
426
427         switch (req->operation) {
428         case BLKIF_OP_READ:
429                 operation = READ;
430                 break;
431         case BLKIF_OP_WRITE:
432                 operation = WRITE;
433                 break;
434         case BLKIF_OP_WRITE_BARRIER:
435                 operation = WRITE_BARRIER;
436                 break;
437         default:
438                 operation = 0; /* make gcc happy */
439                 BUG();
440         }
441
442         /* Check that the number of segments is sane. */
443         nseg = req->nr_segments;
444         if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
445             unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
446                 DPRINTK("Bad number of segments in request (%d)\n", nseg);
447                 goto fail_response;
448         }
449
450         preq.dev           = req->handle;
451         preq.sector_number = req->u.rw.sector_number;
452         preq.nr_sects      = 0;
453
454         pending_req->blkif     = blkif;
455         pending_req->id        = req->id;
456         pending_req->operation = req->operation;
457         pending_req->status    = BLKIF_RSP_OKAY;
458         pending_req->nr_pages  = nseg;
459
460         /* Fill out preq.nr_sects with proper amount of sectors, and setup
461          * assign map[..] with the PFN of the page in our domain with the
462          * corresponding grant reference for each page.
463          */
464         for (i = 0; i < nseg; i++) {
465                 uint32_t flags;
466
467                 seg[i].nsec = req->u.rw.seg[i].last_sect -
468                         req->u.rw.seg[i].first_sect + 1;
469                 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
470                     (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
471                         goto fail_response;
472                 preq.nr_sects += seg[i].nsec;
473
474                 flags = GNTMAP_host_map;
475                 if (operation != READ)
476                         flags |= GNTMAP_readonly;
477                 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
478                                   req->u.rw.seg[i].gref, blkif->domid);
479         }
480
481         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
482         BUG_ON(ret);
483
484         /* Now swizzel the MFN in our domain with the MFN from the other domain
485          * so that when we access vaddr(pending_req,i) it has the contents of
486          * the page from the other domain.
487          */
488         for (i = 0; i < nseg; i++) {
489                 if (unlikely(map[i].status != 0)) {
490                         DPRINTK("invalid buffer -- could not remap it\n");
491                         map[i].handle = BLKBACK_INVALID_HANDLE;
492                         ret |= 1;
493                 }
494
495                 pending_handle(pending_req, i) = map[i].handle;
496
497                 if (ret)
498                         continue;
499
500                 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
501                         blkbk->pending_page(pending_req, i), false);
502                 if (ret) {
503                         printk(KERN_ALERT "Failed to install M2P override for"\
504                                 " %lx (ret: %d)\n", (unsigned long)
505                                 map[i].dev_bus_addr, ret);
506                         /* We could switch over to GNTTABOP_copy */
507                         continue;
508                 }
509
510                 seg[i].buf  = map[i].dev_bus_addr |
511                         (req->u.rw.seg[i].first_sect << 9);
512         }
513
514         /* If we have failed at this point, we need to undo the M2P override,
515          * set gnttab_set_unmap_op on all of the grant references and perform
516          * the hypercall to unmap the grants - that is all done in
517          * fast_flush_area.
518          */
519         if (ret)
520                 goto fail_flush;
521
522         if (vbd_translate(&preq, blkif, operation) != 0) {
523                 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
524                         operation == READ ? "read" : "write",
525                         preq.sector_number,
526                         preq.sector_number + preq.nr_sects, preq.dev);
527                 goto fail_flush;
528         }
529
530         /* This corresponding blkif_put is done in __end_block_io_op */
531         blkif_get(blkif);
532
533         for (i = 0; i < nseg; i++) {
534                 if (((int)preq.sector_number|(int)seg[i].nsec) &
535                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
536                         DPRINTK("Misaligned I/O request from domain %d",
537                                 blkif->domid);
538                         goto fail_put_bio;
539                 }
540
541                 while ((bio == NULL) ||
542                        (bio_add_page(bio,
543                                      blkbk->pending_page(pending_req, i),
544                                      seg[i].nsec << 9,
545                                      seg[i].buf & ~PAGE_MASK) == 0)) {
546
547                         bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
548                         if (unlikely(bio == NULL))
549                                 goto fail_put_bio;
550
551                         bio->bi_bdev    = preq.bdev;
552                         bio->bi_private = pending_req;
553                         bio->bi_end_io  = end_block_io_op;
554                         bio->bi_sector  = preq.sector_number;
555                 }
556
557                 preq.sector_number += seg[i].nsec;
558         }
559
560         /* This will be hit if the operation was a barrier. */
561         if (!bio) {
562                 BUG_ON(operation != WRITE_BARRIER);
563                 bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
564                 if (unlikely(bio == NULL))
565                         goto fail_put_bio;
566
567                 bio->bi_bdev    = preq.bdev;
568                 bio->bi_private = pending_req;
569                 bio->bi_end_io  = end_block_io_op;
570                 bio->bi_sector  = -1;
571         }
572
573
574         /* We set it one so that the last submit_bio does not have to call
575          * atomic_inc.
576          */
577         atomic_set(&pending_req->pendcnt, nbio);
578
579         /* Get a reference count for the disk queue and start sending I/O */
580         blk_get_queue(q);
581         blk_start_plug(&plug);
582
583         for (i = 0; i < nbio; i++)
584                 submit_bio(operation, biolist[i]);
585
586         blk_finish_plug(&plug);
587         /* Let the I/Os go.. */
588         blk_put_queue(q);
589
590         if (operation == READ)
591                 blkif->st_rd_sect += preq.nr_sects;
592         else if (operation == WRITE || operation == WRITE_BARRIER)
593                 blkif->st_wr_sect += preq.nr_sects;
594
595         return;
596
597  fail_flush:
598         fast_flush_area(pending_req);
599  fail_response:
600         /* Haven't submitted any bio's yet. */
601         make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
602         free_req(pending_req);
603         msleep(1); /* back off a bit */
604         return;
605
606  fail_put_bio:
607         for (i = 0; i < (nbio-1); i++)
608                 bio_put(biolist[i]);
609         __end_block_io_op(pending_req, -EINVAL);
610         msleep(1); /* back off a bit */
611         return;
612 }
613
614
615
616 /*
617  * Put a response on the ring on how the operation fared.
618  */
619 static void make_response(struct blkif_st *blkif, u64 id,
620                           unsigned short op, int st)
621 {
622         struct blkif_response  resp;
623         unsigned long     flags;
624         union blkif_back_rings *blk_rings = &blkif->blk_rings;
625         int more_to_do = 0;
626         int notify;
627
628         resp.id        = id;
629         resp.operation = op;
630         resp.status    = st;
631
632         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
633         /* Place on the response ring for the relevant domain. */
634         switch (blkif->blk_protocol) {
635         case BLKIF_PROTOCOL_NATIVE:
636                 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
637                        &resp, sizeof(resp));
638                 break;
639         case BLKIF_PROTOCOL_X86_32:
640                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
641                        &resp, sizeof(resp));
642                 break;
643         case BLKIF_PROTOCOL_X86_64:
644                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
645                        &resp, sizeof(resp));
646                 break;
647         default:
648                 BUG();
649         }
650         blk_rings->common.rsp_prod_pvt++;
651         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
652         if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
653                 /*
654                  * Tail check for pending requests. Allows frontend to avoid
655                  * notifications if requests are already in flight (lower
656                  * overheads and promotes batching).
657                  */
658                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
659
660         } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
661                 more_to_do = 1;
662         }
663
664         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
665
666         if (more_to_do)
667                 blkif_notify_work(blkif);
668         if (notify)
669                 notify_remote_via_irq(blkif->irq);
670 }
671
672 static int __init blkif_init(void)
673 {
674         int i, mmap_pages;
675         int rc = 0;
676
677         if (!xen_pv_domain())
678                 return -ENODEV;
679
680         blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
681         if (!blkbk) {
682                 printk(KERN_ALERT "%s: out of memory!\n", __func__);
683                 return -ENOMEM;
684         }
685
686         mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
687
688         blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
689                                         blkif_reqs, GFP_KERNEL);
690         blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
691                                         mmap_pages, GFP_KERNEL);
692         blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
693                                         mmap_pages, GFP_KERNEL);
694
695         if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
696             !blkbk->pending_pages) {
697                 rc = -ENOMEM;
698                 goto out_of_memory;
699         }
700
701         for (i = 0; i < mmap_pages; i++) {
702                 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
703                 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
704                 if (blkbk->pending_pages[i] == NULL) {
705                         rc = -ENOMEM;
706                         goto out_of_memory;
707                 }
708         }
709         rc = blkif_interface_init();
710         if (rc)
711                 goto failed_init;
712
713         memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
714
715         INIT_LIST_HEAD(&blkbk->pending_free);
716         spin_lock_init(&blkbk->pending_free_lock);
717         init_waitqueue_head(&blkbk->pending_free_wq);
718
719         for (i = 0; i < blkif_reqs; i++)
720                 list_add_tail(&blkbk->pending_reqs[i].free_list,
721                               &blkbk->pending_free);
722
723         rc = blkif_xenbus_init();
724         if (rc)
725                 goto failed_init;
726
727         return 0;
728
729  out_of_memory:
730         printk(KERN_ERR "%s: out of memory\n", __func__);
731  failed_init:
732         kfree(blkbk->pending_reqs);
733         kfree(blkbk->pending_grant_handles);
734         for (i = 0; i < mmap_pages; i++) {
735                 if (blkbk->pending_pages[i])
736                         __free_page(blkbk->pending_pages[i]);
737         }
738         kfree(blkbk->pending_pages);
739         kfree(blkbk);
740         blkbk = NULL;
741         return rc;
742 }
743
744 module_init(blkif_init);
745
746 MODULE_LICENSE("Dual BSD/GPL");