xen/blkback: Use kzalloc's, and GFP_KERNEL for data structures.
[firefly-linux-kernel-4.4.55.git] / drivers / xen / blkback / blkback.c
1 /******************************************************************************
2  * arch/xen/drivers/blkif/backend/main.c
3  *
4  * Back-end of the driver for virtual block devices. This portion of the
5  * driver exports a 'unified' block-device interface that can be accessed
6  * by any operating system that implements a compatible front end. A
7  * reference front-end implementation can be found in:
8  *  arch/xen/drivers/blkif/frontend
9  *
10  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11  * Copyright (c) 2005, Christopher Clark
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/delay.h>
42 #include <linux/freezer.h>
43
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <asm/xen/hypervisor.h>
47 #include <asm/xen/hypercall.h>
48 #include "common.h"
49
50 /*
51  * These are rather arbitrary. They are fairly large because adjacent requests
52  * pulled from a communication ring are quite likely to end up being part of
53  * the same scatter/gather request at the disc.
54  *
55  * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
56  *
57  * This will increase the chances of being able to write whole tracks.
58  * 64 should be enough to keep us competitive with Linux.
59  */
60 static int blkif_reqs = 64;
61 module_param_named(reqs, blkif_reqs, int, 0);
62 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
63
64 /* Run-time switchable: /sys/module/blkback/parameters/ */
65 static unsigned int log_stats = 0;
66 static unsigned int debug_lvl = 0;
67 module_param(log_stats, int, 0644);
68 module_param(debug_lvl, int, 0644);
69
70 /*
71  * Each outstanding request that we've passed to the lower device layers has a
72  * 'pending_req' allocated to it. Each buffer_head that completes decrements
73  * the pendcnt towards zero. When it hits zero, the specified domain has a
74  * response queued for it, with the saved 'id' passed back.
75  */
76 typedef struct {
77         blkif_t       *blkif;
78         u64            id;
79         int            nr_pages;
80         atomic_t       pendcnt;
81         unsigned short operation;
82         int            status;
83         struct list_head free_list;
84 } pending_req_t;
85
86 #define BLKBACK_INVALID_HANDLE (~0)
87
88 struct xen_blkbk {
89         pending_req_t   *pending_reqs;
90         struct list_head        pending_free;
91         spinlock_t              pending_free_lock;
92         wait_queue_head_t       pending_free_wq;
93         struct page             **pending_pages;
94         grant_handle_t          *pending_grant_handles;
95 };
96
97 static struct xen_blkbk *blkbk;
98
99 static inline int vaddr_pagenr(pending_req_t *req, int seg)
100 {
101         return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
102 }
103
104 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
105
106 static inline unsigned long vaddr(pending_req_t *req, int seg)
107 {
108         unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
109         return (unsigned long)pfn_to_kaddr(pfn);
110 }
111
112 #define pending_handle(_req, _seg) \
113         (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
114
115
116 static int do_block_io_op(blkif_t *blkif);
117 static void dispatch_rw_block_io(blkif_t *blkif,
118                                  struct blkif_request *req,
119                                  pending_req_t *pending_req);
120 static void make_response(blkif_t *blkif, u64 id,
121                           unsigned short op, int st);
122
123 /******************************************************************
124  * misc small helpers
125  */
126 static pending_req_t* alloc_req(void)
127 {
128         pending_req_t *req = NULL;
129         unsigned long flags;
130
131         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
132         if (!list_empty(&blkbk->pending_free)) {
133                 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
134                 list_del(&req->free_list);
135         }
136         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
137         return req;
138 }
139
140 static void free_req(pending_req_t *req)
141 {
142         unsigned long flags;
143         int was_empty;
144
145         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
146         was_empty = list_empty(&blkbk->pending_free);
147         list_add(&req->free_list, &blkbk->pending_free);
148         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
149         if (was_empty)
150                 wake_up(&blkbk->pending_free_wq);
151 }
152
153 static void unplug_queue(blkif_t *blkif)
154 {
155         if (blkif->plug == NULL)
156                 return;
157         if (blkif->plug->unplug_fn)
158                 blkif->plug->unplug_fn(blkif->plug);
159         blk_put_queue(blkif->plug);
160         blkif->plug = NULL;
161 }
162
163 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
164 {
165         struct request_queue *q = bdev_get_queue(bdev);
166
167         if (q == blkif->plug)
168                 return;
169         unplug_queue(blkif);
170         blk_get_queue(q);
171         blkif->plug = q;
172 }
173
174 static void fast_flush_area(pending_req_t *req)
175 {
176         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
177         unsigned int i, invcount = 0;
178         grant_handle_t handle;
179         int ret;
180
181         for (i = 0; i < req->nr_pages; i++) {
182                 handle = pending_handle(req, i);
183                 if (handle == BLKBACK_INVALID_HANDLE)
184                         continue;
185                 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
186                                     GNTMAP_host_map, handle);
187                 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
188                 invcount++;
189         }
190
191         ret = HYPERVISOR_grant_table_op(
192                 GNTTABOP_unmap_grant_ref, unmap, invcount);
193         BUG_ON(ret);
194         /* Note, we use invcount, so nr->pages, so we can't index
195          * using vaddr(req, i). */
196         for (i = 0; i < invcount; i++) {
197                 ret = m2p_remove_override(
198                         virt_to_page(unmap[i].host_addr), false);
199                 if (ret) {
200                         printk(KERN_ALERT "Failed to remove M2P override for " \
201                                 "%lx\n", (unsigned long)unmap[i].host_addr);
202                         continue;
203                 }
204         }
205 }
206
207 /******************************************************************
208  * SCHEDULER FUNCTIONS
209  */
210
211 static void print_stats(blkif_t *blkif)
212 {
213         printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
214                current->comm, blkif->st_oo_req,
215                blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
216         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
217         blkif->st_rd_req = 0;
218         blkif->st_wr_req = 0;
219         blkif->st_oo_req = 0;
220 }
221
222 int blkif_schedule(void *arg)
223 {
224         blkif_t *blkif = arg;
225         struct vbd *vbd = &blkif->vbd;
226
227         blkif_get(blkif);
228
229         if (debug_lvl)
230                 printk(KERN_DEBUG "%s: started\n", current->comm);
231
232         while (!kthread_should_stop()) {
233                 if (try_to_freeze())
234                         continue;
235                 if (unlikely(vbd->size != vbd_size(vbd)))
236                         vbd_resize(blkif);
237
238                 wait_event_interruptible(
239                         blkif->wq,
240                         blkif->waiting_reqs || kthread_should_stop());
241                 wait_event_interruptible(
242                         blkbk->pending_free_wq,
243                         !list_empty(&blkbk->pending_free) || kthread_should_stop());
244
245                 blkif->waiting_reqs = 0;
246                 smp_mb(); /* clear flag *before* checking for work */
247
248                 if (do_block_io_op(blkif))
249                         blkif->waiting_reqs = 1;
250                 unplug_queue(blkif);
251
252                 if (log_stats && time_after(jiffies, blkif->st_print))
253                         print_stats(blkif);
254         }
255
256         if (log_stats)
257                 print_stats(blkif);
258         if (debug_lvl)
259                 printk(KERN_DEBUG "%s: exiting\n", current->comm);
260
261         blkif->xenblkd = NULL;
262         blkif_put(blkif);
263
264         return 0;
265 }
266
267 /******************************************************************
268  * COMPLETION CALLBACK -- Called as bh->b_end_io()
269  */
270
271 static void __end_block_io_op(pending_req_t *pending_req, int error)
272 {
273         /* An error fails the entire request. */
274         if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
275             (error == -EOPNOTSUPP)) {
276                 DPRINTK("blkback: write barrier op failed, not supported\n");
277                 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
278                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
279         } else if (error) {
280                 DPRINTK("Buffer not up-to-date at end of operation, "
281                         "error=%d\n", error);
282                 pending_req->status = BLKIF_RSP_ERROR;
283         }
284
285         if (atomic_dec_and_test(&pending_req->pendcnt)) {
286                 fast_flush_area(pending_req);
287                 make_response(pending_req->blkif, pending_req->id,
288                               pending_req->operation, pending_req->status);
289                 blkif_put(pending_req->blkif);
290                 free_req(pending_req);
291         }
292 }
293
294 static void end_block_io_op(struct bio *bio, int error)
295 {
296         __end_block_io_op(bio->bi_private, error);
297         bio_put(bio);
298 }
299
300
301 /******************************************************************************
302  * NOTIFICATION FROM GUEST OS.
303  */
304
305 static void blkif_notify_work(blkif_t *blkif)
306 {
307         blkif->waiting_reqs = 1;
308         wake_up(&blkif->wq);
309 }
310
311 irqreturn_t blkif_be_int(int irq, void *dev_id)
312 {
313         blkif_notify_work(dev_id);
314         return IRQ_HANDLED;
315 }
316
317
318
319 /******************************************************************
320  * DOWNWARD CALLS -- These interface with the block-device layer proper.
321  */
322
323 static int do_block_io_op(blkif_t *blkif)
324 {
325         union blkif_back_rings *blk_rings = &blkif->blk_rings;
326         struct blkif_request req;
327         pending_req_t *pending_req;
328         RING_IDX rc, rp;
329         int more_to_do = 0;
330
331         rc = blk_rings->common.req_cons;
332         rp = blk_rings->common.sring->req_prod;
333         rmb(); /* Ensure we see queued requests up to 'rp'. */
334
335         while (rc != rp) {
336
337                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
338                         break;
339
340                 if (kthread_should_stop()) {
341                         more_to_do = 1;
342                         break;
343                 }
344
345                 pending_req = alloc_req();
346                 if (NULL == pending_req) {
347                         blkif->st_oo_req++;
348                         more_to_do = 1;
349                         break;
350                 }
351
352                 switch (blkif->blk_protocol) {
353                 case BLKIF_PROTOCOL_NATIVE:
354                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
355                         break;
356                 case BLKIF_PROTOCOL_X86_32:
357                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
358                         break;
359                 case BLKIF_PROTOCOL_X86_64:
360                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
361                         break;
362                 default:
363                         BUG();
364                 }
365                 blk_rings->common.req_cons = ++rc; /* before make_response() */
366
367                 /* Apply all sanity checks to /private copy/ of request. */
368                 barrier();
369
370                 switch (req.operation) {
371                 case BLKIF_OP_READ:
372                         blkif->st_rd_req++;
373                         dispatch_rw_block_io(blkif, &req, pending_req);
374                         break;
375                 case BLKIF_OP_WRITE_BARRIER:
376                         blkif->st_br_req++;
377                         /* fall through */
378                 case BLKIF_OP_WRITE:
379                         blkif->st_wr_req++;
380                         dispatch_rw_block_io(blkif, &req, pending_req);
381                         break;
382                 default:
383                         /* A good sign something is wrong: sleep for a while to
384                          * avoid excessive CPU consumption by a bad guest. */
385                         msleep(1);
386                         DPRINTK("error: unknown block io operation [%d]\n",
387                                 req.operation);
388                         make_response(blkif, req.id, req.operation,
389                                       BLKIF_RSP_ERROR);
390                         free_req(pending_req);
391                         break;
392                 }
393
394                 /* Yield point for this unbounded loop. */
395                 cond_resched();
396         }
397
398         return more_to_do;
399 }
400
401 static void dispatch_rw_block_io(blkif_t *blkif,
402                                  struct blkif_request *req,
403                                  pending_req_t *pending_req)
404 {
405         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
406         struct phys_req preq;
407         struct {
408                 unsigned long buf; unsigned int nsec;
409         } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
410         unsigned int nseg;
411         struct bio *bio = NULL;
412         int ret, i;
413         int operation;
414
415         switch (req->operation) {
416         case BLKIF_OP_READ:
417                 operation = READ;
418                 break;
419         case BLKIF_OP_WRITE:
420                 operation = WRITE;
421                 break;
422         case BLKIF_OP_WRITE_BARRIER:
423                 operation = REQ_FLUSH | REQ_FUA;
424                 break;
425         default:
426                 operation = 0; /* make gcc happy */
427                 BUG();
428         }
429
430         /* Check that number of segments is sane. */
431         nseg = req->nr_segments;
432         if (unlikely(nseg == 0 && operation != (REQ_FLUSH | REQ_FUA)) ||
433             unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
434                 DPRINTK("Bad number of segments in request (%d)\n", nseg);
435                 goto fail_response;
436         }
437
438         preq.dev           = req->handle;
439         preq.sector_number = req->u.rw.sector_number;
440         preq.nr_sects      = 0;
441
442         pending_req->blkif     = blkif;
443         pending_req->id        = req->id;
444         pending_req->operation = req->operation;
445         pending_req->status    = BLKIF_RSP_OKAY;
446         pending_req->nr_pages  = nseg;
447
448         for (i = 0; i < nseg; i++) {
449                 uint32_t flags;
450
451                 seg[i].nsec = req->u.rw.seg[i].last_sect -
452                         req->u.rw.seg[i].first_sect + 1;
453
454                 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
455                     (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
456                         goto fail_response;
457                 preq.nr_sects += seg[i].nsec;
458
459                 flags = GNTMAP_host_map;
460                 if (operation != READ)
461                         flags |= GNTMAP_readonly;
462                 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
463                                   req->u.rw.seg[i].gref, blkif->domid);
464         }
465
466         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
467         BUG_ON(ret);
468
469         for (i = 0; i < nseg; i++) {
470                 if (unlikely(map[i].status != 0)) {
471                         DPRINTK("invalid buffer -- could not remap it\n");
472                         map[i].handle = BLKBACK_INVALID_HANDLE;
473                         ret |= 1;
474                 }
475
476                 pending_handle(pending_req, i) = map[i].handle;
477
478                 if (ret)
479                         continue;
480                 
481                 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
482                         blkbk->pending_page(pending_req, i), false);
483                 if (ret) {
484                         printk(KERN_ALERT "Failed to install M2P override for"\
485                                 " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret);
486                         continue;
487                 }
488
489                 seg[i].buf  = map[i].dev_bus_addr |
490                         (req->u.rw.seg[i].first_sect << 9);
491         }
492
493         if (ret)
494                 goto fail_flush;
495
496         if (vbd_translate(&preq, blkif, operation) != 0) {
497                 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
498                         operation == READ ? "read" : "write",
499                         preq.sector_number,
500                         preq.sector_number + preq.nr_sects, preq.dev);
501                 goto fail_flush;
502         }
503
504         plug_queue(blkif, preq.bdev);
505         atomic_set(&pending_req->pendcnt, 1);
506         blkif_get(blkif);
507
508         for (i = 0; i < nseg; i++) {
509                 if (((int)preq.sector_number|(int)seg[i].nsec) &
510                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
511                         DPRINTK("Misaligned I/O request from domain %d",
512                                 blkif->domid);
513                         goto fail_put_bio;
514                 }
515
516                 while ((bio == NULL) ||
517                        (bio_add_page(bio,
518                                      blkbk->pending_page(pending_req, i),
519                                      seg[i].nsec << 9,
520                                      seg[i].buf & ~PAGE_MASK) == 0)) {
521                         if (bio) {
522                                 atomic_inc(&pending_req->pendcnt);
523                                 submit_bio(operation, bio);
524                         }
525
526                         bio = bio_alloc(GFP_KERNEL, nseg-i);
527                         if (unlikely(bio == NULL))
528                                 goto fail_put_bio;
529
530                         bio->bi_bdev    = preq.bdev;
531                         bio->bi_private = pending_req;
532                         bio->bi_end_io  = end_block_io_op;
533                         bio->bi_sector  = preq.sector_number;
534                 }
535
536                 preq.sector_number += seg[i].nsec;
537         }
538
539         if (!bio) {
540                 BUG_ON(operation != (REQ_FLUSH | REQ_FUA));
541                 bio = bio_alloc(GFP_KERNEL, 0);
542                 if (unlikely(bio == NULL))
543                         goto fail_put_bio;
544
545                 bio->bi_bdev    = preq.bdev;
546                 bio->bi_private = pending_req;
547                 bio->bi_end_io  = end_block_io_op;
548                 bio->bi_sector  = -1;
549         }
550
551         submit_bio(operation, bio);
552
553         if (operation == READ)
554                 blkif->st_rd_sect += preq.nr_sects;
555         else if (operation == WRITE || operation == (REQ_FLUSH | REQ_FUA))
556                 blkif->st_wr_sect += preq.nr_sects;
557
558         return;
559
560  fail_flush:
561         fast_flush_area(pending_req);
562  fail_response:
563         make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
564         free_req(pending_req);
565         msleep(1); /* back off a bit */
566         return;
567
568  fail_put_bio:
569         __end_block_io_op(pending_req, -EINVAL);
570         if (bio)
571                 bio_put(bio);
572         unplug_queue(blkif);
573         msleep(1); /* back off a bit */
574         return;
575 }
576
577
578
579 /******************************************************************
580  * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
581  */
582
583
584 static void make_response(blkif_t *blkif, u64 id,
585                           unsigned short op, int st)
586 {
587         struct blkif_response  resp;
588         unsigned long     flags;
589         union blkif_back_rings *blk_rings = &blkif->blk_rings;
590         int more_to_do = 0;
591         int notify;
592
593         resp.id        = id;
594         resp.operation = op;
595         resp.status    = st;
596
597         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
598         /* Place on the response ring for the relevant domain. */
599         switch (blkif->blk_protocol) {
600         case BLKIF_PROTOCOL_NATIVE:
601                 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
602                        &resp, sizeof(resp));
603                 break;
604         case BLKIF_PROTOCOL_X86_32:
605                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
606                        &resp, sizeof(resp));
607                 break;
608         case BLKIF_PROTOCOL_X86_64:
609                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
610                        &resp, sizeof(resp));
611                 break;
612         default:
613                 BUG();
614         }
615         blk_rings->common.rsp_prod_pvt++;
616         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
617         if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
618                 /*
619                  * Tail check for pending requests. Allows frontend to avoid
620                  * notifications if requests are already in flight (lower
621                  * overheads and promotes batching).
622                  */
623                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
624
625         } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
626                 more_to_do = 1;
627         }
628
629         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
630
631         if (more_to_do)
632                 blkif_notify_work(blkif);
633         if (notify)
634                 notify_remote_via_irq(blkif->irq);
635 }
636
637 static int __init blkif_init(void)
638 {
639         int i, mmap_pages;
640         int rc = 0;
641
642         if (!xen_pv_domain())
643                 return -ENODEV;
644
645         blkbk = (struct xen_blkbk *)kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
646         if (!blkbk) {
647                 printk(KERN_ALERT "%s: out of memory!\n", __func__);
648                 return -ENOMEM;
649         }
650
651         mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
652
653         blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
654                                         blkif_reqs, GFP_KERNEL);
655         blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
656                                         mmap_pages, GFP_KERNEL);
657         blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
658                                         mmap_pages, GFP_KERNEL);
659
660         if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
661                 rc = -ENOMEM;
662                 goto out_of_memory;
663         }
664
665         for (i = 0; i < mmap_pages; i++) {
666                 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
667                 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
668                 if (blkbk->pending_pages[i] == NULL) {
669                         rc = -ENOMEM;
670                         goto out_of_memory;
671                 }
672         }
673         rc = blkif_interface_init();
674         if (rc)
675                 goto failed_init;
676
677         memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
678
679         INIT_LIST_HEAD(&blkbk->pending_free);
680         spin_lock_init(&blkbk->pending_free_lock);
681         init_waitqueue_head(&blkbk->pending_free_wq);
682
683         for (i = 0; i < blkif_reqs; i++)
684                 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
685
686         rc = blkif_xenbus_init();
687         if (rc)
688                 goto failed_init;
689
690         return 0;
691
692  out_of_memory:
693         printk(KERN_ERR "%s: out of memory\n", __func__);
694  failed_init:
695         kfree(blkbk->pending_reqs);
696         kfree(blkbk->pending_grant_handles);
697         for (i = 0; i < mmap_pages; i++) {
698                 if (blkbk->pending_pages[i])
699                         __free_page(blkbk->pending_pages[i]);
700         }
701         kfree(blkbk->pending_pages);
702         kfree(blkbk);
703         blkbk = NULL;
704         return rc;
705 }
706
707 module_init(blkif_init);
708
709 MODULE_LICENSE("Dual BSD/GPL");