xen/blkback: Union the blkif_request request specific fields
[firefly-linux-kernel-4.4.55.git] / drivers / xen / blkback / blkback.c
1 /******************************************************************************
2  * arch/xen/drivers/blkif/backend/main.c
3  *
4  * Back-end of the driver for virtual block devices. This portion of the
5  * driver exports a 'unified' block-device interface that can be accessed
6  * by any operating system that implements a compatible front end. A
7  * reference front-end implementation can be found in:
8  *  arch/xen/drivers/blkif/frontend
9  *
10  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11  * Copyright (c) 2005, Christopher Clark
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/delay.h>
42 #include <linux/freezer.h>
43
44 #include <xen/balloon.h>
45 #include <xen/events.h>
46 #include <xen/page.h>
47 #include <asm/xen/hypervisor.h>
48 #include <asm/xen/hypercall.h>
49 #include "common.h"
50
51 /*
52  * These are rather arbitrary. They are fairly large because adjacent requests
53  * pulled from a communication ring are quite likely to end up being part of
54  * the same scatter/gather request at the disc.
55  *
56  * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57  *
58  * This will increase the chances of being able to write whole tracks.
59  * 64 should be enough to keep us competitive with Linux.
60  */
61 static int blkif_reqs = 64;
62 module_param_named(reqs, blkif_reqs, int, 0);
63 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64
65 /* Run-time switchable: /sys/module/blkback/parameters/ */
66 static unsigned int log_stats = 0;
67 static unsigned int debug_lvl = 0;
68 module_param(log_stats, int, 0644);
69 module_param(debug_lvl, int, 0644);
70
71 /*
72  * Each outstanding request that we've passed to the lower device layers has a
73  * 'pending_req' allocated to it. Each buffer_head that completes decrements
74  * the pendcnt towards zero. When it hits zero, the specified domain has a
75  * response queued for it, with the saved 'id' passed back.
76  */
77 typedef struct {
78         blkif_t       *blkif;
79         u64            id;
80         int            nr_pages;
81         atomic_t       pendcnt;
82         unsigned short operation;
83         int            status;
84         struct list_head free_list;
85 } pending_req_t;
86
87 #define BLKBACK_INVALID_HANDLE (~0)
88
89 struct xen_blkbk {
90         pending_req_t   *pending_reqs;
91         struct list_head        pending_free;
92         spinlock_t              pending_free_lock;
93         wait_queue_head_t       pending_free_wq;
94         struct page             **pending_pages;
95         grant_handle_t          *pending_grant_handles;
96 };
97
98 static struct xen_blkbk *blkbk;
99
100 static inline int vaddr_pagenr(pending_req_t *req, int seg)
101 {
102         return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
103 }
104
105 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
106
107 static inline unsigned long vaddr(pending_req_t *req, int seg)
108 {
109         unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
110         return (unsigned long)pfn_to_kaddr(pfn);
111 }
112
113 #define pending_handle(_req, _seg) \
114         (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
115
116
117 static int do_block_io_op(blkif_t *blkif);
118 static void dispatch_rw_block_io(blkif_t *blkif,
119                                  struct blkif_request *req,
120                                  pending_req_t *pending_req);
121 static void make_response(blkif_t *blkif, u64 id,
122                           unsigned short op, int st);
123
124 /******************************************************************
125  * misc small helpers
126  */
127 static pending_req_t* alloc_req(void)
128 {
129         pending_req_t *req = NULL;
130         unsigned long flags;
131
132         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
133         if (!list_empty(&blkbk->pending_free)) {
134                 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
135                 list_del(&req->free_list);
136         }
137         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
138         return req;
139 }
140
141 static void free_req(pending_req_t *req)
142 {
143         unsigned long flags;
144         int was_empty;
145
146         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
147         was_empty = list_empty(&blkbk->pending_free);
148         list_add(&req->free_list, &blkbk->pending_free);
149         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
150         if (was_empty)
151                 wake_up(&blkbk->pending_free_wq);
152 }
153
154 static void unplug_queue(blkif_t *blkif)
155 {
156         if (blkif->plug == NULL)
157                 return;
158         if (blkif->plug->unplug_fn)
159                 blkif->plug->unplug_fn(blkif->plug);
160         blk_put_queue(blkif->plug);
161         blkif->plug = NULL;
162 }
163
164 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
165 {
166         struct request_queue *q = bdev_get_queue(bdev);
167
168         if (q == blkif->plug)
169                 return;
170         unplug_queue(blkif);
171         blk_get_queue(q);
172         blkif->plug = q;
173 }
174
175 static void fast_flush_area(pending_req_t *req)
176 {
177         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
178         unsigned int i, invcount = 0;
179         grant_handle_t handle;
180         int ret;
181
182         for (i = 0; i < req->nr_pages; i++) {
183                 handle = pending_handle(req, i);
184                 if (handle == BLKBACK_INVALID_HANDLE)
185                         continue;
186                 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
187                                     GNTMAP_host_map, handle);
188                 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
189                 invcount++;
190         }
191
192         ret = HYPERVISOR_grant_table_op(
193                 GNTTABOP_unmap_grant_ref, unmap, invcount);
194         BUG_ON(ret);
195 }
196
197 /******************************************************************
198  * SCHEDULER FUNCTIONS
199  */
200
201 static void print_stats(blkif_t *blkif)
202 {
203         printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
204                current->comm, blkif->st_oo_req,
205                blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
206         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
207         blkif->st_rd_req = 0;
208         blkif->st_wr_req = 0;
209         blkif->st_oo_req = 0;
210 }
211
212 int blkif_schedule(void *arg)
213 {
214         blkif_t *blkif = arg;
215         struct vbd *vbd = &blkif->vbd;
216
217         blkif_get(blkif);
218
219         if (debug_lvl)
220                 printk(KERN_DEBUG "%s: started\n", current->comm);
221
222         while (!kthread_should_stop()) {
223                 if (try_to_freeze())
224                         continue;
225                 if (unlikely(vbd->size != vbd_size(vbd)))
226                         vbd_resize(blkif);
227
228                 wait_event_interruptible(
229                         blkif->wq,
230                         blkif->waiting_reqs || kthread_should_stop());
231                 wait_event_interruptible(
232                         blkbk->pending_free_wq,
233                         !list_empty(&blkbk->pending_free) || kthread_should_stop());
234
235                 blkif->waiting_reqs = 0;
236                 smp_mb(); /* clear flag *before* checking for work */
237
238                 if (do_block_io_op(blkif))
239                         blkif->waiting_reqs = 1;
240                 unplug_queue(blkif);
241
242                 if (log_stats && time_after(jiffies, blkif->st_print))
243                         print_stats(blkif);
244         }
245
246         if (log_stats)
247                 print_stats(blkif);
248         if (debug_lvl)
249                 printk(KERN_DEBUG "%s: exiting\n", current->comm);
250
251         blkif->xenblkd = NULL;
252         blkif_put(blkif);
253
254         return 0;
255 }
256
257 /******************************************************************
258  * COMPLETION CALLBACK -- Called as bh->b_end_io()
259  */
260
261 static void __end_block_io_op(pending_req_t *pending_req, int error)
262 {
263         /* An error fails the entire request. */
264         if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
265             (error == -EOPNOTSUPP)) {
266                 DPRINTK("blkback: write barrier op failed, not supported\n");
267                 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
268                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
269         } else if (error) {
270                 DPRINTK("Buffer not up-to-date at end of operation, "
271                         "error=%d\n", error);
272                 pending_req->status = BLKIF_RSP_ERROR;
273         }
274
275         if (atomic_dec_and_test(&pending_req->pendcnt)) {
276                 fast_flush_area(pending_req);
277                 make_response(pending_req->blkif, pending_req->id,
278                               pending_req->operation, pending_req->status);
279                 blkif_put(pending_req->blkif);
280                 free_req(pending_req);
281         }
282 }
283
284 static void end_block_io_op(struct bio *bio, int error)
285 {
286         __end_block_io_op(bio->bi_private, error);
287         bio_put(bio);
288 }
289
290
291 /******************************************************************************
292  * NOTIFICATION FROM GUEST OS.
293  */
294
295 static void blkif_notify_work(blkif_t *blkif)
296 {
297         blkif->waiting_reqs = 1;
298         wake_up(&blkif->wq);
299 }
300
301 irqreturn_t blkif_be_int(int irq, void *dev_id)
302 {
303         blkif_notify_work(dev_id);
304         return IRQ_HANDLED;
305 }
306
307
308
309 /******************************************************************
310  * DOWNWARD CALLS -- These interface with the block-device layer proper.
311  */
312
313 static int do_block_io_op(blkif_t *blkif)
314 {
315         union blkif_back_rings *blk_rings = &blkif->blk_rings;
316         struct blkif_request req;
317         pending_req_t *pending_req;
318         RING_IDX rc, rp;
319         int more_to_do = 0;
320
321         rc = blk_rings->common.req_cons;
322         rp = blk_rings->common.sring->req_prod;
323         rmb(); /* Ensure we see queued requests up to 'rp'. */
324
325         while (rc != rp) {
326
327                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
328                         break;
329
330                 if (kthread_should_stop()) {
331                         more_to_do = 1;
332                         break;
333                 }
334
335                 pending_req = alloc_req();
336                 if (NULL == pending_req) {
337                         blkif->st_oo_req++;
338                         more_to_do = 1;
339                         break;
340                 }
341
342                 switch (blkif->blk_protocol) {
343                 case BLKIF_PROTOCOL_NATIVE:
344                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
345                         break;
346                 case BLKIF_PROTOCOL_X86_32:
347                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
348                         break;
349                 case BLKIF_PROTOCOL_X86_64:
350                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
351                         break;
352                 default:
353                         BUG();
354                 }
355                 blk_rings->common.req_cons = ++rc; /* before make_response() */
356
357                 /* Apply all sanity checks to /private copy/ of request. */
358                 barrier();
359
360                 switch (req.operation) {
361                 case BLKIF_OP_READ:
362                         blkif->st_rd_req++;
363                         dispatch_rw_block_io(blkif, &req, pending_req);
364                         break;
365                 case BLKIF_OP_WRITE_BARRIER:
366                         blkif->st_br_req++;
367                         /* fall through */
368                 case BLKIF_OP_WRITE:
369                         blkif->st_wr_req++;
370                         dispatch_rw_block_io(blkif, &req, pending_req);
371                         break;
372                 default:
373                         /* A good sign something is wrong: sleep for a while to
374                          * avoid excessive CPU consumption by a bad guest. */
375                         msleep(1);
376                         DPRINTK("error: unknown block io operation [%d]\n",
377                                 req.operation);
378                         make_response(blkif, req.id, req.operation,
379                                       BLKIF_RSP_ERROR);
380                         free_req(pending_req);
381                         break;
382                 }
383
384                 /* Yield point for this unbounded loop. */
385                 cond_resched();
386         }
387
388         return more_to_do;
389 }
390
391 static void dispatch_rw_block_io(blkif_t *blkif,
392                                  struct blkif_request *req,
393                                  pending_req_t *pending_req)
394 {
395         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
396         struct phys_req preq;
397         struct {
398                 unsigned long buf; unsigned int nsec;
399         } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
400         unsigned int nseg;
401         struct bio *bio = NULL;
402         int ret, i;
403         int operation;
404
405         switch (req->operation) {
406         case BLKIF_OP_READ:
407                 operation = READ;
408                 break;
409         case BLKIF_OP_WRITE:
410                 operation = WRITE;
411                 break;
412         case BLKIF_OP_WRITE_BARRIER:
413                 operation = REQ_FLUSH | REQ_FUA;
414                 break;
415         default:
416                 operation = 0; /* make gcc happy */
417                 BUG();
418         }
419
420         /* Check that number of segments is sane. */
421         nseg = req->nr_segments;
422         if (unlikely(nseg == 0 && operation != (REQ_FLUSH | REQ_FUA)) ||
423             unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
424                 DPRINTK("Bad number of segments in request (%d)\n", nseg);
425                 goto fail_response;
426         }
427
428         preq.dev           = req->handle;
429         preq.sector_number = req->u.rw.sector_number;
430         preq.nr_sects      = 0;
431
432         pending_req->blkif     = blkif;
433         pending_req->id        = req->id;
434         pending_req->operation = req->operation;
435         pending_req->status    = BLKIF_RSP_OKAY;
436         pending_req->nr_pages  = nseg;
437
438         for (i = 0; i < nseg; i++) {
439                 uint32_t flags;
440
441                 seg[i].nsec = req->u.rw.seg[i].last_sect -
442                         req->u.rw.seg[i].first_sect + 1;
443
444                 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
445                     (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
446                         goto fail_response;
447                 preq.nr_sects += seg[i].nsec;
448
449                 flags = GNTMAP_host_map;
450                 if (operation != READ)
451                         flags |= GNTMAP_readonly;
452                 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
453                                   req->u.rw.seg[i].gref, blkif->domid);
454         }
455
456         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
457         BUG_ON(ret);
458
459         for (i = 0; i < nseg; i++) {
460                 if (unlikely(map[i].status != 0)) {
461                         DPRINTK("invalid buffer -- could not remap it\n");
462                         map[i].handle = BLKBACK_INVALID_HANDLE;
463                         ret |= 1;
464                 }
465
466                 pending_handle(pending_req, i) = map[i].handle;
467
468                 if (ret)
469                         continue;
470
471                 set_phys_to_machine(
472                         page_to_pfn(blkbk->pending_page(pending_req, i)),
473                         FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
474                 seg[i].buf  = map[i].dev_bus_addr |
475                         (req->u.rw.seg[i].first_sect << 9);
476         }
477
478         if (ret)
479                 goto fail_flush;
480
481         if (vbd_translate(&preq, blkif, operation) != 0) {
482                 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
483                         operation == READ ? "read" : "write",
484                         preq.sector_number,
485                         preq.sector_number + preq.nr_sects, preq.dev);
486                 goto fail_flush;
487         }
488
489         plug_queue(blkif, preq.bdev);
490         atomic_set(&pending_req->pendcnt, 1);
491         blkif_get(blkif);
492
493         for (i = 0; i < nseg; i++) {
494                 if (((int)preq.sector_number|(int)seg[i].nsec) &
495                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
496                         DPRINTK("Misaligned I/O request from domain %d",
497                                 blkif->domid);
498                         goto fail_put_bio;
499                 }
500
501                 while ((bio == NULL) ||
502                        (bio_add_page(bio,
503                                      blkbk->pending_page(pending_req, i),
504                                      seg[i].nsec << 9,
505                                      seg[i].buf & ~PAGE_MASK) == 0)) {
506                         if (bio) {
507                                 atomic_inc(&pending_req->pendcnt);
508                                 submit_bio(operation, bio);
509                         }
510
511                         bio = bio_alloc(GFP_KERNEL, nseg-i);
512                         if (unlikely(bio == NULL))
513                                 goto fail_put_bio;
514
515                         bio->bi_bdev    = preq.bdev;
516                         bio->bi_private = pending_req;
517                         bio->bi_end_io  = end_block_io_op;
518                         bio->bi_sector  = preq.sector_number;
519                 }
520
521                 preq.sector_number += seg[i].nsec;
522         }
523
524         if (!bio) {
525                 BUG_ON(operation != (REQ_FLUSH | REQ_FUA));
526                 bio = bio_alloc(GFP_KERNEL, 0);
527                 if (unlikely(bio == NULL))
528                         goto fail_put_bio;
529
530                 bio->bi_bdev    = preq.bdev;
531                 bio->bi_private = pending_req;
532                 bio->bi_end_io  = end_block_io_op;
533                 bio->bi_sector  = -1;
534         }
535
536         submit_bio(operation, bio);
537
538         if (operation == READ)
539                 blkif->st_rd_sect += preq.nr_sects;
540         else if (operation == WRITE || operation == (REQ_FLUSH | REQ_FUA))
541                 blkif->st_wr_sect += preq.nr_sects;
542
543         return;
544
545  fail_flush:
546         fast_flush_area(pending_req);
547  fail_response:
548         make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
549         free_req(pending_req);
550         msleep(1); /* back off a bit */
551         return;
552
553  fail_put_bio:
554         __end_block_io_op(pending_req, -EINVAL);
555         if (bio)
556                 bio_put(bio);
557         unplug_queue(blkif);
558         msleep(1); /* back off a bit */
559         return;
560 }
561
562
563
564 /******************************************************************
565  * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
566  */
567
568
569 static void make_response(blkif_t *blkif, u64 id,
570                           unsigned short op, int st)
571 {
572         struct blkif_response  resp;
573         unsigned long     flags;
574         union blkif_back_rings *blk_rings = &blkif->blk_rings;
575         int more_to_do = 0;
576         int notify;
577
578         resp.id        = id;
579         resp.operation = op;
580         resp.status    = st;
581
582         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
583         /* Place on the response ring for the relevant domain. */
584         switch (blkif->blk_protocol) {
585         case BLKIF_PROTOCOL_NATIVE:
586                 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
587                        &resp, sizeof(resp));
588                 break;
589         case BLKIF_PROTOCOL_X86_32:
590                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
591                        &resp, sizeof(resp));
592                 break;
593         case BLKIF_PROTOCOL_X86_64:
594                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
595                        &resp, sizeof(resp));
596                 break;
597         default:
598                 BUG();
599         }
600         blk_rings->common.rsp_prod_pvt++;
601         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
602         if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
603                 /*
604                  * Tail check for pending requests. Allows frontend to avoid
605                  * notifications if requests are already in flight (lower
606                  * overheads and promotes batching).
607                  */
608                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
609
610         } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
611                 more_to_do = 1;
612         }
613
614         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
615
616         if (more_to_do)
617                 blkif_notify_work(blkif);
618         if (notify)
619                 notify_remote_via_irq(blkif->irq);
620 }
621
622 static int __init blkif_init(void)
623 {
624         int i, mmap_pages;
625         int rc = 0;
626
627         if (!xen_pv_domain())
628                 return -ENODEV;
629
630         blkbk = (struct xen_blkbk *)vmalloc(sizeof(struct xen_blkbk));
631         if (!blkbk) {
632                 printk(KERN_ALERT "%s: out of memory!\n", __func__);
633                 return -ENOMEM;
634         }
635
636         mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
637
638         blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
639                                         blkif_reqs, GFP_KERNEL);
640         blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
641                                         mmap_pages, GFP_KERNEL);
642         blkbk->pending_pages         = alloc_empty_pages_and_pagevec(mmap_pages);
643
644         if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
645                 rc = -ENOMEM;
646                 goto out_of_memory;
647         }
648
649         for (i = 0; i < mmap_pages; i++)
650                 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
651
652         rc = blkif_interface_init();
653         if (rc)
654                 goto failed_init;
655
656         memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
657
658         INIT_LIST_HEAD(&blkbk->pending_free);
659         spin_lock_init(&blkbk->pending_free_lock);
660         init_waitqueue_head(&blkbk->pending_free_wq);
661
662         for (i = 0; i < blkif_reqs; i++)
663                 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
664
665         rc = blkif_xenbus_init();
666         if (rc)
667                 goto failed_init;
668
669         return 0;
670
671  out_of_memory:
672         printk(KERN_ERR "%s: out of memory\n", __func__);
673  failed_init:
674         kfree(blkbk->pending_reqs);
675         kfree(blkbk->pending_grant_handles);
676         free_empty_pages_and_pagevec(blkbk->pending_pages, mmap_pages);
677         vfree(blkbk);
678         blkbk = NULL;
679         return rc;
680 }
681
682 module_init(blkif_init);
683
684 MODULE_LICENSE("Dual BSD/GPL");