xen/blkback: Cleanup move the code a bit around.
[firefly-linux-kernel-4.4.55.git] / drivers / xen / blkback / blkback.c
index 8d988f4513aaa3aa674202d2eada9475d3ef2b91..f282463d7b5ce0f5d63938a5c02eccff4528a6d3 100644 (file)
@@ -1,11 +1,10 @@
 /******************************************************************************
- * arch/xen/drivers/blkif/backend/main.c
  *
  * Back-end of the driver for virtual block devices. This portion of the
  * driver exports a 'unified' block-device interface that can be accessed
  * by any operating system that implements a compatible front end. A
  * reference front-end implementation can be found in:
- *  arch/xen/drivers/blkif/frontend
+ *  drivers/block/xen-blkfront.c
  *
  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  * Copyright (c) 2005, Christopher Clark
 #include <linux/list.h>
 #include <linux/delay.h>
 #include <linux/freezer.h>
-#include <xen/balloon.h>
+
 #include <xen/events.h>
 #include <xen/page.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 #include "common.h"
 
+#define WRITE_BARRIER  (REQ_WRITE | REQ_FLUSH | REQ_FUA)
+
 /*
  * These are rather arbitrary. They are fairly large because adjacent requests
  * pulled from a communication ring are quite likely to end up being part of
@@ -62,8 +63,8 @@ module_param_named(reqs, blkif_reqs, int, 0);
 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
 
 /* Run-time switchable: /sys/module/blkback/parameters/ */
-static unsigned int log_stats = 0;
-static unsigned int debug_lvl = 0;
+static unsigned int log_stats;
+static unsigned int debug_lvl;
 module_param(log_stats, int, 0644);
 module_param(debug_lvl, int, 0644);
 
@@ -73,126 +74,118 @@ module_param(debug_lvl, int, 0644);
  * the pendcnt towards zero. When it hits zero, the specified domain has a
  * response queued for it, with the saved 'id' passed back.
  */
-typedef struct {
-       blkif_t       *blkif;
+struct pending_req {
+       struct blkif_st       *blkif;
        u64            id;
        int            nr_pages;
        atomic_t       pendcnt;
        unsigned short operation;
        int            status;
        struct list_head free_list;
-} pending_req_t;
-
-static pending_req_t *pending_reqs;
-static struct list_head pending_free;
-static DEFINE_SPINLOCK(pending_free_lock);
-static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
+};
 
 #define BLKBACK_INVALID_HANDLE (~0)
 
-static struct page **pending_pages;
-static grant_handle_t *pending_grant_handles;
+struct xen_blkbk {
+       struct pending_req      *pending_reqs;
+       /* List of all 'pending_req' available */
+       struct list_head        pending_free;
+       /* And its spinlock. */
+       spinlock_t              pending_free_lock;
+       wait_queue_head_t       pending_free_wq;
+       /* The list of all pages that are available. */
+       struct page             **pending_pages;
+       /* And the grant handles that are available. */
+       grant_handle_t          *pending_grant_handles;
+};
+
+static struct xen_blkbk *blkbk;
 
-static inline int vaddr_pagenr(pending_req_t *req, int seg)
+/*
+ * Little helpful macro to figure out the index and virtual address of the
+ * pending_pages[..]. For each 'pending_req' we have have up to
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
+ * 10 and would index in the pending_pages[..]. */
+static inline int vaddr_pagenr(struct pending_req *req, int seg)
 {
-       return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
+       return (req - blkbk->pending_reqs) *
+               BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
 }
 
-static inline unsigned long vaddr(pending_req_t *req, int seg)
+#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
+
+static inline unsigned long vaddr(struct pending_req *req, int seg)
 {
-       unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+       unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
        return (unsigned long)pfn_to_kaddr(pfn);
 }
 
 #define pending_handle(_req, _seg) \
-       (pending_grant_handles[vaddr_pagenr(_req, _seg)])
+       (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
 
 
-static int do_block_io_op(blkif_t *blkif);
-static void dispatch_rw_block_io(blkif_t *blkif,
+static int do_block_io_op(struct blkif_st *blkif);
+static void dispatch_rw_block_io(struct blkif_st *blkif,
                                 struct blkif_request *req,
-                                pending_req_t *pending_req);
-static void make_response(blkif_t *blkif, u64 id,
+                                struct pending_req *pending_req);
+static void make_response(struct blkif_st *blkif, u64 id,
                          unsigned short op, int st);
 
-/******************************************************************
- * misc small helpers
+/*
+ * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
  */
-static pending_req_t* alloc_req(void)
+static struct pending_req *alloc_req(void)
 {
-       pending_req_t *req = NULL;
+       struct pending_req *req = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&pending_free_lock, flags);
-       if (!list_empty(&pending_free)) {
-               req = list_entry(pending_free.next, pending_req_t, free_list);
+       spin_lock_irqsave(&blkbk->pending_free_lock, flags);
+       if (!list_empty(&blkbk->pending_free)) {
+               req = list_entry(blkbk->pending_free.next, struct pending_req,
+                                free_list);
                list_del(&req->free_list);
        }
-       spin_unlock_irqrestore(&pending_free_lock, flags);
+       spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
        return req;
 }
 
-static void free_req(pending_req_t *req)
+/*
+ * Return the 'pending_req' structure back to the freepool. We also
+ * wake up the thread if it was waiting for a free page.
+ */
+static void free_req(struct pending_req *req)
 {
        unsigned long flags;
        int was_empty;
 
-       spin_lock_irqsave(&pending_free_lock, flags);
-       was_empty = list_empty(&pending_free);
-       list_add(&req->free_list, &pending_free);
-       spin_unlock_irqrestore(&pending_free_lock, flags);
+       spin_lock_irqsave(&blkbk->pending_free_lock, flags);
+       was_empty = list_empty(&blkbk->pending_free);
+       list_add(&req->free_list, &blkbk->pending_free);
+       spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
        if (was_empty)
-               wake_up(&pending_free_wq);
-}
-
-static void unplug_queue(blkif_t *blkif)
-{
-       if (blkif->plug == NULL)
-               return;
-       if (blkif->plug->unplug_fn)
-               blkif->plug->unplug_fn(blkif->plug);
-       blk_put_queue(blkif->plug);
-       blkif->plug = NULL;
+               wake_up(&blkbk->pending_free_wq);
 }
 
-static void plug_queue(blkif_t *blkif, struct block_device *bdev)
+/*
+ * Notification from the guest OS.
+ */
+static void blkif_notify_work(struct blkif_st *blkif)
 {
-       struct request_queue *q = bdev_get_queue(bdev);
-
-       if (q == blkif->plug)
-               return;
-       unplug_queue(blkif);
-       blk_get_queue(q);
-       blkif->plug = q;
+       blkif->waiting_reqs = 1;
+       wake_up(&blkif->wq);
 }
 
-static void fast_flush_area(pending_req_t *req)
+irqreturn_t blkif_be_int(int irq, void *dev_id)
 {
-       struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-       unsigned int i, invcount = 0;
-       grant_handle_t handle;
-       int ret;
-
-       for (i = 0; i < req->nr_pages; i++) {
-               handle = pending_handle(req, i);
-               if (handle == BLKBACK_INVALID_HANDLE)
-                       continue;
-               gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
-                                   GNTMAP_host_map, handle);
-               pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
-               invcount++;
-       }
-
-       ret = HYPERVISOR_grant_table_op(
-               GNTTABOP_unmap_grant_ref, unmap, invcount);
-       BUG_ON(ret);
+       blkif_notify_work(dev_id);
+       return IRQ_HANDLED;
 }
 
-/******************************************************************
+/*
  * SCHEDULER FUNCTIONS
  */
 
-static void print_stats(blkif_t *blkif)
+static void print_stats(struct blkif_st *blkif)
 {
        printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
               current->comm, blkif->st_oo_req,
@@ -205,7 +198,8 @@ static void print_stats(blkif_t *blkif)
 
 int blkif_schedule(void *arg)
 {
-       blkif_t *blkif = arg;
+       struct blkif_st *blkif = arg;
+       struct vbd *vbd = &blkif->vbd;
 
        blkif_get(blkif);
 
@@ -215,20 +209,22 @@ int blkif_schedule(void *arg)
        while (!kthread_should_stop()) {
                if (try_to_freeze())
                        continue;
+               if (unlikely(vbd->size != vbd_size(vbd)))
+                       vbd_resize(blkif);
 
                wait_event_interruptible(
                        blkif->wq,
                        blkif->waiting_reqs || kthread_should_stop());
                wait_event_interruptible(
-                       pending_free_wq,
-                       !list_empty(&pending_free) || kthread_should_stop());
+                       blkbk->pending_free_wq,
+                       !list_empty(&blkbk->pending_free) ||
+                       kthread_should_stop());
 
                blkif->waiting_reqs = 0;
                smp_mb(); /* clear flag *before* checking for work */
 
                if (do_block_io_op(blkif))
                        blkif->waiting_reqs = 1;
-               unplug_queue(blkif);
 
                if (log_stats && time_after(jiffies, blkif->st_print))
                        print_stats(blkif);
@@ -245,11 +241,48 @@ int blkif_schedule(void *arg)
        return 0;
 }
 
-/******************************************************************
- * COMPLETION CALLBACK -- Called as bh->b_end_io()
+/*
+ * Unmap the grant references, and also remove the M2P over-rides
+ * used in the 'pending_req'.
+*/
+static void fast_flush_area(struct pending_req *req)
+{
+       struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       unsigned int i, invcount = 0;
+       grant_handle_t handle;
+       int ret;
+
+       for (i = 0; i < req->nr_pages; i++) {
+               handle = pending_handle(req, i);
+               if (handle == BLKBACK_INVALID_HANDLE)
+                       continue;
+               gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
+                                   GNTMAP_host_map, handle);
+               pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
+               invcount++;
+       }
+
+       ret = HYPERVISOR_grant_table_op(
+               GNTTABOP_unmap_grant_ref, unmap, invcount);
+       BUG_ON(ret);
+       /* Note, we use invcount, so nr->pages, so we can't index
+        * using vaddr(req, i).
+        */
+       for (i = 0; i < invcount; i++) {
+               ret = m2p_remove_override(
+                       virt_to_page(unmap[i].host_addr), false);
+               if (ret) {
+                       printk(KERN_ALERT "Failed to remove M2P override for " \
+                               "%lx\n", (unsigned long)unmap[i].host_addr);
+                       continue;
+               }
+       }
+}
+/*
+ * Completion callback on the bio's. Called as bh->b_end_io()
  */
 
-static void __end_block_io_op(pending_req_t *pending_req, int error)
+static void __end_block_io_op(struct pending_req *pending_req, int error)
 {
        /* An error fails the entire request. */
        if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
@@ -263,6 +296,10 @@ static void __end_block_io_op(pending_req_t *pending_req, int error)
                pending_req->status = BLKIF_RSP_ERROR;
        }
 
+       /* If all of the bio's have completed it is time to unmap
+        * the grant references associated with 'request' and provide
+        * the proper response on the ring.
+        */
        if (atomic_dec_and_test(&pending_req->pendcnt)) {
                fast_flush_area(pending_req);
                make_response(pending_req->blkif, pending_req->id,
@@ -272,6 +309,9 @@ static void __end_block_io_op(pending_req_t *pending_req, int error)
        }
 }
 
+/*
+ * bio callback.
+ */
 static void end_block_io_op(struct bio *bio, int error)
 {
        __end_block_io_op(bio->bi_private, error);
@@ -279,33 +319,17 @@ static void end_block_io_op(struct bio *bio, int error)
 }
 
 
-/******************************************************************************
- * NOTIFICATION FROM GUEST OS.
- */
-
-static void blkif_notify_work(blkif_t *blkif)
-{
-       blkif->waiting_reqs = 1;
-       wake_up(&blkif->wq);
-}
-
-irqreturn_t blkif_be_int(int irq, void *dev_id)
-{
-       blkif_notify_work(dev_id);
-       return IRQ_HANDLED;
-}
-
-
 
-/******************************************************************
- * DOWNWARD CALLS -- These interface with the block-device layer proper.
+/*
+ * Function to copy the from the ring buffer the 'struct blkif_request'
+ * (which has the sectors we want, number of them, grant references, etc),
+ * and transmute  it to the block API to hand it over to the proper block disk.
  */
-
-static int do_block_io_op(blkif_t *blkif)
+static int do_block_io_op(struct blkif_st *blkif)
 {
        union blkif_back_rings *blk_rings = &blkif->blk_rings;
        struct blkif_request req;
-       pending_req_t *pending_req;
+       struct pending_req *pending_req;
        RING_IDX rc, rp;
        int more_to_do = 0;
 
@@ -379,11 +403,14 @@ static int do_block_io_op(blkif_t *blkif)
        return more_to_do;
 }
 
-static void dispatch_rw_block_io(blkif_t *blkif,
+/*
+ * Transumation of the 'struct blkif_request' to a proper 'struct bio'
+ * and call the 'submit_bio' to pass it to the underlaying storage.
+ */
+static void dispatch_rw_block_io(struct blkif_st *blkif,
                                 struct blkif_request *req,
-                                pending_req_t *pending_req)
+                                struct pending_req *pending_req)
 {
-       extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
        struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct phys_req preq;
        struct {
@@ -391,8 +418,11 @@ static void dispatch_rw_block_io(blkif_t *blkif,
        } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int nseg;
        struct bio *bio = NULL;
-       int ret, i;
+       struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       int ret, i, nbio = 0;
        int operation;
+       struct blk_plug plug;
+       struct request_queue *q;
 
        switch (req->operation) {
        case BLKIF_OP_READ:
@@ -409,7 +439,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
                BUG();
        }
 
-       /* Check that number of segments is sane. */
+       /* Check that the number of segments is sane. */
        nseg = req->nr_segments;
        if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
            unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
@@ -418,7 +448,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
        }
 
        preq.dev           = req->handle;
-       preq.sector_number = req->sector_number;
+       preq.sector_number = req->u.rw.sector_number;
        preq.nr_sects      = 0;
 
        pending_req->blkif     = blkif;
@@ -427,14 +457,17 @@ static void dispatch_rw_block_io(blkif_t *blkif,
        pending_req->status    = BLKIF_RSP_OKAY;
        pending_req->nr_pages  = nseg;
 
+       /* Fill out preq.nr_sects with proper amount of sectors, and setup
+        * assign map[..] with the PFN of the page in our domain with the
+        * corresponding grant reference for each page.
+        */
        for (i = 0; i < nseg; i++) {
                uint32_t flags;
 
-               seg[i].nsec = req->seg[i].last_sect -
-                       req->seg[i].first_sect + 1;
-
-               if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
-                   (req->seg[i].last_sect < req->seg[i].first_sect))
+               seg[i].nsec = req->u.rw.seg[i].last_sect -
+                       req->u.rw.seg[i].first_sect + 1;
+               if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
+                   (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
                        goto fail_response;
                preq.nr_sects += seg[i].nsec;
 
@@ -442,12 +475,16 @@ static void dispatch_rw_block_io(blkif_t *blkif,
                if (operation != READ)
                        flags |= GNTMAP_readonly;
                gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
-                                 req->seg[i].gref, blkif->domid);
+                                 req->u.rw.seg[i].gref, blkif->domid);
        }
 
        ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
        BUG_ON(ret);
 
+       /* Now swizzel the MFN in our domain with the MFN from the other domain
+        * so that when we access vaddr(pending_req,i) it has the contents of
+        * the page from the other domain.
+        */
        for (i = 0; i < nseg; i++) {
                if (unlikely(map[i].status != 0)) {
                        DPRINTK("invalid buffer -- could not remap it\n");
@@ -460,13 +497,25 @@ static void dispatch_rw_block_io(blkif_t *blkif,
                if (ret)
                        continue;
 
-               set_phys_to_machine(__pa(vaddr(
-                       pending_req, i)) >> PAGE_SHIFT,
-                       FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
+               ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
+                       blkbk->pending_page(pending_req, i), false);
+               if (ret) {
+                       printk(KERN_ALERT "Failed to install M2P override for"\
+                               " %lx (ret: %d)\n", (unsigned long)
+                               map[i].dev_bus_addr, ret);
+                       /* We could switch over to GNTTABOP_copy */
+                       continue;
+               }
+
                seg[i].buf  = map[i].dev_bus_addr |
-                       (req->seg[i].first_sect << 9);
+                       (req->u.rw.seg[i].first_sect << 9);
        }
 
+       /* If we have failed at this point, we need to undo the M2P override,
+        * set gnttab_set_unmap_op on all of the grant references and perform
+        * the hypercall to unmap the grants - that is all done in
+        * fast_flush_area.
+        */
        if (ret)
                goto fail_flush;
 
@@ -478,13 +527,12 @@ static void dispatch_rw_block_io(blkif_t *blkif,
                goto fail_flush;
        }
 
-       plug_queue(blkif, preq.bdev);
-       atomic_set(&pending_req->pendcnt, 1);
+       /* This corresponding blkif_put is done in __end_block_io_op */
        blkif_get(blkif);
 
        for (i = 0; i < nseg; i++) {
                if (((int)preq.sector_number|(int)seg[i].nsec) &
-                   ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
+                   ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
                        DPRINTK("Misaligned I/O request from domain %d",
                                blkif->domid);
                        goto fail_put_bio;
@@ -492,15 +540,11 @@ static void dispatch_rw_block_io(blkif_t *blkif,
 
                while ((bio == NULL) ||
                       (bio_add_page(bio,
-                                    virt_to_page(vaddr(pending_req, i)),
+                                    blkbk->pending_page(pending_req, i),
                                     seg[i].nsec << 9,
                                     seg[i].buf & ~PAGE_MASK) == 0)) {
-                       if (bio) {
-                               atomic_inc(&pending_req->pendcnt);
-                               submit_bio(operation, bio);
-                       }
 
-                       bio = bio_alloc(GFP_KERNEL, nseg-i);
+                       bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
                        if (unlikely(bio == NULL))
                                goto fail_put_bio;
 
@@ -513,9 +557,10 @@ static void dispatch_rw_block_io(blkif_t *blkif,
                preq.sector_number += seg[i].nsec;
        }
 
+       /* This will be hit if the operation was a barrier. */
        if (!bio) {
                BUG_ON(operation != WRITE_BARRIER);
-               bio = bio_alloc(GFP_KERNEL, 0);
+               bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
                if (unlikely(bio == NULL))
                        goto fail_put_bio;
 
@@ -525,7 +570,22 @@ static void dispatch_rw_block_io(blkif_t *blkif,
                bio->bi_sector  = -1;
        }
 
-       submit_bio(operation, bio);
+
+       /* We set it one so that the last submit_bio does not have to call
+        * atomic_inc.
+        */
+       atomic_set(&pending_req->pendcnt, nbio);
+
+       /* Get a reference count for the disk queue and start sending I/O */
+       blk_get_queue(q);
+       blk_start_plug(&plug);
+
+       for (i = 0; i < nbio; i++)
+               submit_bio(operation, biolist[i]);
+
+       blk_finish_plug(&plug);
+       /* Let the I/Os go.. */
+       blk_put_queue(q);
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
@@ -537,28 +597,26 @@ static void dispatch_rw_block_io(blkif_t *blkif,
  fail_flush:
        fast_flush_area(pending_req);
  fail_response:
+       /* Haven't submitted any bio's yet. */
        make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
        free_req(pending_req);
        msleep(1); /* back off a bit */
        return;
 
  fail_put_bio:
+       for (i = 0; i < (nbio-1); i++)
+               bio_put(biolist[i]);
        __end_block_io_op(pending_req, -EINVAL);
-       if (bio)
-               bio_put(bio);
-       unplug_queue(blkif);
        msleep(1); /* back off a bit */
        return;
 }
 
 
 
-/******************************************************************
- * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
+/*
+ * Put a response on the ring on how the operation fared.
  */
-
-
-static void make_response(blkif_t *blkif, u64 id,
+static void make_response(struct blkif_st *blkif, u64 id,
                          unsigned short op, int st)
 {
        struct blkif_response  resp;
@@ -614,43 +672,73 @@ static void make_response(blkif_t *blkif, u64 id,
 static int __init blkif_init(void)
 {
        int i, mmap_pages;
+       int rc = 0;
 
-       printk(KERN_CRIT "***blkif_init\n");
        if (!xen_pv_domain())
                return -ENODEV;
 
+       blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
+       if (!blkbk) {
+               printk(KERN_ALERT "%s: out of memory!\n", __func__);
+               return -ENOMEM;
+       }
+
        mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
 
-       pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
+       blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
                                        blkif_reqs, GFP_KERNEL);
-       pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
+       blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
+                                       mmap_pages, GFP_KERNEL);
+       blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
                                        mmap_pages, GFP_KERNEL);
-       pending_pages         = alloc_empty_pages_and_pagevec(mmap_pages);
 
-       if (!pending_reqs || !pending_grant_handles || !pending_pages)
+       if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
+           !blkbk->pending_pages) {
+               rc = -ENOMEM;
                goto out_of_memory;
+       }
 
-       for (i = 0; i < mmap_pages; i++)
-               pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
+       for (i = 0; i < mmap_pages; i++) {
+               blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
+               blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
+               if (blkbk->pending_pages[i] == NULL) {
+                       rc = -ENOMEM;
+                       goto out_of_memory;
+               }
+       }
+       rc = blkif_interface_init();
+       if (rc)
+               goto failed_init;
 
-       blkif_interface_init();
+       memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
 
-       memset(pending_reqs, 0, sizeof(pending_reqs));
-       INIT_LIST_HEAD(&pending_free);
+       INIT_LIST_HEAD(&blkbk->pending_free);
+       spin_lock_init(&blkbk->pending_free_lock);
+       init_waitqueue_head(&blkbk->pending_free_wq);
 
        for (i = 0; i < blkif_reqs; i++)
-               list_add_tail(&pending_reqs[i].free_list, &pending_free);
+               list_add_tail(&blkbk->pending_reqs[i].free_list,
+                             &blkbk->pending_free);
 
-       blkif_xenbus_init();
+       rc = blkif_xenbus_init();
+       if (rc)
+               goto failed_init;
 
        return 0;
 
  out_of_memory:
-       kfree(pending_reqs);
-       kfree(pending_grant_handles);
-       free_empty_pages_and_pagevec(pending_pages, mmap_pages);
-       printk("%s: out of memory\n", __FUNCTION__);
-       return -ENOMEM;
+       printk(KERN_ERR "%s: out of memory\n", __func__);
+ failed_init:
+       kfree(blkbk->pending_reqs);
+       kfree(blkbk->pending_grant_handles);
+       for (i = 0; i < mmap_pages; i++) {
+               if (blkbk->pending_pages[i])
+                       __free_page(blkbk->pending_pages[i]);
+       }
+       kfree(blkbk->pending_pages);
+       kfree(blkbk);
+       blkbk = NULL;
+       return rc;
 }
 
 module_init(blkif_init);