xen/blkback: Fix the WRITE_BARRIER
[firefly-linux-kernel-4.4.55.git] / drivers / xen / blkback / blkback.c
1 /******************************************************************************
2  * arch/xen/drivers/blkif/backend/main.c
3  *
4  * Back-end of the driver for virtual block devices. This portion of the
5  * driver exports a 'unified' block-device interface that can be accessed
6  * by any operating system that implements a compatible front end. A
7  * reference front-end implementation can be found in:
8  *  arch/xen/drivers/blkif/frontend
9  *
10  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11  * Copyright (c) 2005, Christopher Clark
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/delay.h>
42 #include <linux/freezer.h>
43
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <asm/xen/hypervisor.h>
47 #include <asm/xen/hypercall.h>
48 #include "common.h"
49
50 #define WRITE_BARRIER   (REQ_WRITE | REQ_FLUSH | REQ_FUA)
51
52 /*
53  * These are rather arbitrary. They are fairly large because adjacent requests
54  * pulled from a communication ring are quite likely to end up being part of
55  * the same scatter/gather request at the disc.
56  *
57  * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
58  *
59  * This will increase the chances of being able to write whole tracks.
60  * 64 should be enough to keep us competitive with Linux.
61  */
62 static int blkif_reqs = 64;
63 module_param_named(reqs, blkif_reqs, int, 0);
64 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
65
66 /* Run-time switchable: /sys/module/blkback/parameters/ */
67 static unsigned int log_stats = 0;
68 static unsigned int debug_lvl = 0;
69 module_param(log_stats, int, 0644);
70 module_param(debug_lvl, int, 0644);
71
72 /*
73  * Each outstanding request that we've passed to the lower device layers has a
74  * 'pending_req' allocated to it. Each buffer_head that completes decrements
75  * the pendcnt towards zero. When it hits zero, the specified domain has a
76  * response queued for it, with the saved 'id' passed back.
77  */
78 typedef struct {
79         blkif_t       *blkif;
80         u64            id;
81         int            nr_pages;
82         atomic_t       pendcnt;
83         unsigned short operation;
84         int            status;
85         struct list_head free_list;
86 } pending_req_t;
87
88 #define BLKBACK_INVALID_HANDLE (~0)
89
90 struct xen_blkbk {
91         pending_req_t   *pending_reqs;
92         struct list_head        pending_free;
93         spinlock_t              pending_free_lock;
94         wait_queue_head_t       pending_free_wq;
95         struct page             **pending_pages;
96         grant_handle_t          *pending_grant_handles;
97 };
98
99 static struct xen_blkbk *blkbk;
100
101 static inline int vaddr_pagenr(pending_req_t *req, int seg)
102 {
103         return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
104 }
105
106 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
107
108 static inline unsigned long vaddr(pending_req_t *req, int seg)
109 {
110         unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
111         return (unsigned long)pfn_to_kaddr(pfn);
112 }
113
114 #define pending_handle(_req, _seg) \
115         (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
116
117
118 static int do_block_io_op(blkif_t *blkif);
119 static void dispatch_rw_block_io(blkif_t *blkif,
120                                  struct blkif_request *req,
121                                  pending_req_t *pending_req);
122 static void make_response(blkif_t *blkif, u64 id,
123                           unsigned short op, int st);
124
125 /******************************************************************
126  * misc small helpers
127  */
128 static pending_req_t* alloc_req(void)
129 {
130         pending_req_t *req = NULL;
131         unsigned long flags;
132
133         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
134         if (!list_empty(&blkbk->pending_free)) {
135                 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
136                 list_del(&req->free_list);
137         }
138         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
139         return req;
140 }
141
142 static void free_req(pending_req_t *req)
143 {
144         unsigned long flags;
145         int was_empty;
146
147         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
148         was_empty = list_empty(&blkbk->pending_free);
149         list_add(&req->free_list, &blkbk->pending_free);
150         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
151         if (was_empty)
152                 wake_up(&blkbk->pending_free_wq);
153 }
154
155 static void unplug_queue(blkif_t *blkif)
156 {
157         if (blkif->plug == NULL)
158                 return;
159         if (blkif->plug->unplug_fn)
160                 blkif->plug->unplug_fn(blkif->plug);
161         blk_put_queue(blkif->plug);
162         blkif->plug = NULL;
163 }
164
165 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
166 {
167         struct request_queue *q = bdev_get_queue(bdev);
168
169         if (q == blkif->plug)
170                 return;
171         unplug_queue(blkif);
172         blk_get_queue(q);
173         blkif->plug = q;
174 }
175
176 static void fast_flush_area(pending_req_t *req)
177 {
178         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
179         unsigned int i, invcount = 0;
180         grant_handle_t handle;
181         int ret;
182
183         for (i = 0; i < req->nr_pages; i++) {
184                 handle = pending_handle(req, i);
185                 if (handle == BLKBACK_INVALID_HANDLE)
186                         continue;
187                 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
188                                     GNTMAP_host_map, handle);
189                 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
190                 invcount++;
191         }
192
193         ret = HYPERVISOR_grant_table_op(
194                 GNTTABOP_unmap_grant_ref, unmap, invcount);
195         BUG_ON(ret);
196         /* Note, we use invcount, so nr->pages, so we can't index
197          * using vaddr(req, i). */
198         for (i = 0; i < invcount; i++) {
199                 ret = m2p_remove_override(
200                         virt_to_page(unmap[i].host_addr), false);
201                 if (ret) {
202                         printk(KERN_ALERT "Failed to remove M2P override for " \
203                                 "%lx\n", (unsigned long)unmap[i].host_addr);
204                         continue;
205                 }
206         }
207 }
208
209 /******************************************************************
210  * SCHEDULER FUNCTIONS
211  */
212
213 static void print_stats(blkif_t *blkif)
214 {
215         printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
216                current->comm, blkif->st_oo_req,
217                blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
218         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
219         blkif->st_rd_req = 0;
220         blkif->st_wr_req = 0;
221         blkif->st_oo_req = 0;
222 }
223
224 int blkif_schedule(void *arg)
225 {
226         blkif_t *blkif = arg;
227         struct vbd *vbd = &blkif->vbd;
228
229         blkif_get(blkif);
230
231         if (debug_lvl)
232                 printk(KERN_DEBUG "%s: started\n", current->comm);
233
234         while (!kthread_should_stop()) {
235                 if (try_to_freeze())
236                         continue;
237                 if (unlikely(vbd->size != vbd_size(vbd)))
238                         vbd_resize(blkif);
239
240                 wait_event_interruptible(
241                         blkif->wq,
242                         blkif->waiting_reqs || kthread_should_stop());
243                 wait_event_interruptible(
244                         blkbk->pending_free_wq,
245                         !list_empty(&blkbk->pending_free) || kthread_should_stop());
246
247                 blkif->waiting_reqs = 0;
248                 smp_mb(); /* clear flag *before* checking for work */
249
250                 if (do_block_io_op(blkif))
251                         blkif->waiting_reqs = 1;
252                 unplug_queue(blkif);
253
254                 if (log_stats && time_after(jiffies, blkif->st_print))
255                         print_stats(blkif);
256         }
257
258         if (log_stats)
259                 print_stats(blkif);
260         if (debug_lvl)
261                 printk(KERN_DEBUG "%s: exiting\n", current->comm);
262
263         blkif->xenblkd = NULL;
264         blkif_put(blkif);
265
266         return 0;
267 }
268
269 /******************************************************************
270  * COMPLETION CALLBACK -- Called as bh->b_end_io()
271  */
272
273 static void __end_block_io_op(pending_req_t *pending_req, int error)
274 {
275         /* An error fails the entire request. */
276         if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
277             (error == -EOPNOTSUPP)) {
278                 DPRINTK("blkback: write barrier op failed, not supported\n");
279                 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
280                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
281         } else if (error) {
282                 DPRINTK("Buffer not up-to-date at end of operation, "
283                         "error=%d\n", error);
284                 pending_req->status = BLKIF_RSP_ERROR;
285         }
286
287         if (atomic_dec_and_test(&pending_req->pendcnt)) {
288                 fast_flush_area(pending_req);
289                 make_response(pending_req->blkif, pending_req->id,
290                               pending_req->operation, pending_req->status);
291                 blkif_put(pending_req->blkif);
292                 free_req(pending_req);
293         }
294 }
295
296 static void end_block_io_op(struct bio *bio, int error)
297 {
298         __end_block_io_op(bio->bi_private, error);
299         bio_put(bio);
300 }
301
302
303 /******************************************************************************
304  * NOTIFICATION FROM GUEST OS.
305  */
306
307 static void blkif_notify_work(blkif_t *blkif)
308 {
309         blkif->waiting_reqs = 1;
310         wake_up(&blkif->wq);
311 }
312
313 irqreturn_t blkif_be_int(int irq, void *dev_id)
314 {
315         blkif_notify_work(dev_id);
316         return IRQ_HANDLED;
317 }
318
319
320
321 /******************************************************************
322  * DOWNWARD CALLS -- These interface with the block-device layer proper.
323  */
324
325 static int do_block_io_op(blkif_t *blkif)
326 {
327         union blkif_back_rings *blk_rings = &blkif->blk_rings;
328         struct blkif_request req;
329         pending_req_t *pending_req;
330         RING_IDX rc, rp;
331         int more_to_do = 0;
332
333         rc = blk_rings->common.req_cons;
334         rp = blk_rings->common.sring->req_prod;
335         rmb(); /* Ensure we see queued requests up to 'rp'. */
336
337         while (rc != rp) {
338
339                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
340                         break;
341
342                 if (kthread_should_stop()) {
343                         more_to_do = 1;
344                         break;
345                 }
346
347                 pending_req = alloc_req();
348                 if (NULL == pending_req) {
349                         blkif->st_oo_req++;
350                         more_to_do = 1;
351                         break;
352                 }
353
354                 switch (blkif->blk_protocol) {
355                 case BLKIF_PROTOCOL_NATIVE:
356                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
357                         break;
358                 case BLKIF_PROTOCOL_X86_32:
359                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
360                         break;
361                 case BLKIF_PROTOCOL_X86_64:
362                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
363                         break;
364                 default:
365                         BUG();
366                 }
367                 blk_rings->common.req_cons = ++rc; /* before make_response() */
368
369                 /* Apply all sanity checks to /private copy/ of request. */
370                 barrier();
371
372                 switch (req.operation) {
373                 case BLKIF_OP_READ:
374                         blkif->st_rd_req++;
375                         dispatch_rw_block_io(blkif, &req, pending_req);
376                         break;
377                 case BLKIF_OP_WRITE_BARRIER:
378                         blkif->st_br_req++;
379                         /* fall through */
380                 case BLKIF_OP_WRITE:
381                         blkif->st_wr_req++;
382                         dispatch_rw_block_io(blkif, &req, pending_req);
383                         break;
384                 default:
385                         /* A good sign something is wrong: sleep for a while to
386                          * avoid excessive CPU consumption by a bad guest. */
387                         msleep(1);
388                         DPRINTK("error: unknown block io operation [%d]\n",
389                                 req.operation);
390                         make_response(blkif, req.id, req.operation,
391                                       BLKIF_RSP_ERROR);
392                         free_req(pending_req);
393                         break;
394                 }
395
396                 /* Yield point for this unbounded loop. */
397                 cond_resched();
398         }
399
400         return more_to_do;
401 }
402
403 static void dispatch_rw_block_io(blkif_t *blkif,
404                                  struct blkif_request *req,
405                                  pending_req_t *pending_req)
406 {
407         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
408         struct phys_req preq;
409         struct {
410                 unsigned long buf; unsigned int nsec;
411         } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
412         unsigned int nseg;
413         struct bio *bio = NULL;
414         int ret, i;
415         int operation;
416
417         switch (req->operation) {
418         case BLKIF_OP_READ:
419                 operation = READ;
420                 break;
421         case BLKIF_OP_WRITE:
422                 operation = WRITE;
423                 break;
424         case BLKIF_OP_WRITE_BARRIER:
425                 operation = WRITE_BARRIER;
426                 break;
427         default:
428                 operation = 0; /* make gcc happy */
429                 BUG();
430         }
431
432         /* Check that number of segments is sane. */
433         nseg = req->nr_segments;
434         if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
435             unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
436                 DPRINTK("Bad number of segments in request (%d)\n", nseg);
437                 goto fail_response;
438         }
439
440         preq.dev           = req->handle;
441         preq.sector_number = req->u.rw.sector_number;
442         preq.nr_sects      = 0;
443
444         pending_req->blkif     = blkif;
445         pending_req->id        = req->id;
446         pending_req->operation = req->operation;
447         pending_req->status    = BLKIF_RSP_OKAY;
448         pending_req->nr_pages  = nseg;
449
450         for (i = 0; i < nseg; i++) {
451                 uint32_t flags;
452
453                 seg[i].nsec = req->u.rw.seg[i].last_sect -
454                         req->u.rw.seg[i].first_sect + 1;
455
456                 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
457                     (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
458                         goto fail_response;
459                 preq.nr_sects += seg[i].nsec;
460
461                 flags = GNTMAP_host_map;
462                 if (operation != READ)
463                         flags |= GNTMAP_readonly;
464                 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
465                                   req->u.rw.seg[i].gref, blkif->domid);
466         }
467
468         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
469         BUG_ON(ret);
470
471         for (i = 0; i < nseg; i++) {
472                 if (unlikely(map[i].status != 0)) {
473                         DPRINTK("invalid buffer -- could not remap it\n");
474                         map[i].handle = BLKBACK_INVALID_HANDLE;
475                         ret |= 1;
476                 }
477
478                 pending_handle(pending_req, i) = map[i].handle;
479
480                 if (ret)
481                         continue;
482                 
483                 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
484                         blkbk->pending_page(pending_req, i), false);
485                 if (ret) {
486                         printk(KERN_ALERT "Failed to install M2P override for"\
487                                 " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret);
488                         continue;
489                 }
490
491                 seg[i].buf  = map[i].dev_bus_addr |
492                         (req->u.rw.seg[i].first_sect << 9);
493         }
494
495         if (ret)
496                 goto fail_flush;
497
498         if (vbd_translate(&preq, blkif, operation) != 0) {
499                 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
500                         operation == READ ? "read" : "write",
501                         preq.sector_number,
502                         preq.sector_number + preq.nr_sects, preq.dev);
503                 goto fail_flush;
504         }
505
506         plug_queue(blkif, preq.bdev);
507         atomic_set(&pending_req->pendcnt, 1);
508         blkif_get(blkif);
509
510         for (i = 0; i < nseg; i++) {
511                 if (((int)preq.sector_number|(int)seg[i].nsec) &
512                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
513                         DPRINTK("Misaligned I/O request from domain %d",
514                                 blkif->domid);
515                         goto fail_put_bio;
516                 }
517
518                 while ((bio == NULL) ||
519                        (bio_add_page(bio,
520                                      blkbk->pending_page(pending_req, i),
521                                      seg[i].nsec << 9,
522                                      seg[i].buf & ~PAGE_MASK) == 0)) {
523                         if (bio) {
524                                 atomic_inc(&pending_req->pendcnt);
525                                 submit_bio(operation, bio);
526                         }
527
528                         bio = bio_alloc(GFP_KERNEL, nseg-i);
529                         if (unlikely(bio == NULL))
530                                 goto fail_put_bio;
531
532                         bio->bi_bdev    = preq.bdev;
533                         bio->bi_private = pending_req;
534                         bio->bi_end_io  = end_block_io_op;
535                         bio->bi_sector  = preq.sector_number;
536                 }
537
538                 preq.sector_number += seg[i].nsec;
539         }
540
541         if (!bio) {
542                 BUG_ON(operation != WRITE_BARRIER);
543                 bio = bio_alloc(GFP_KERNEL, 0);
544                 if (unlikely(bio == NULL))
545                         goto fail_put_bio;
546
547                 bio->bi_bdev    = preq.bdev;
548                 bio->bi_private = pending_req;
549                 bio->bi_end_io  = end_block_io_op;
550                 bio->bi_sector  = -1;
551         }
552
553         submit_bio(operation, bio);
554
555         if (operation == READ)
556                 blkif->st_rd_sect += preq.nr_sects;
557         else if (operation == WRITE || operation == WRITE_BARRIER)
558                 blkif->st_wr_sect += preq.nr_sects;
559
560         return;
561
562  fail_flush:
563         fast_flush_area(pending_req);
564  fail_response:
565         make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
566         free_req(pending_req);
567         msleep(1); /* back off a bit */
568         return;
569
570  fail_put_bio:
571         __end_block_io_op(pending_req, -EINVAL);
572         if (bio)
573                 bio_put(bio);
574         unplug_queue(blkif);
575         msleep(1); /* back off a bit */
576         return;
577 }
578
579
580
581 /******************************************************************
582  * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
583  */
584
585
586 static void make_response(blkif_t *blkif, u64 id,
587                           unsigned short op, int st)
588 {
589         struct blkif_response  resp;
590         unsigned long     flags;
591         union blkif_back_rings *blk_rings = &blkif->blk_rings;
592         int more_to_do = 0;
593         int notify;
594
595         resp.id        = id;
596         resp.operation = op;
597         resp.status    = st;
598
599         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
600         /* Place on the response ring for the relevant domain. */
601         switch (blkif->blk_protocol) {
602         case BLKIF_PROTOCOL_NATIVE:
603                 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
604                        &resp, sizeof(resp));
605                 break;
606         case BLKIF_PROTOCOL_X86_32:
607                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
608                        &resp, sizeof(resp));
609                 break;
610         case BLKIF_PROTOCOL_X86_64:
611                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
612                        &resp, sizeof(resp));
613                 break;
614         default:
615                 BUG();
616         }
617         blk_rings->common.rsp_prod_pvt++;
618         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
619         if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
620                 /*
621                  * Tail check for pending requests. Allows frontend to avoid
622                  * notifications if requests are already in flight (lower
623                  * overheads and promotes batching).
624                  */
625                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
626
627         } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
628                 more_to_do = 1;
629         }
630
631         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
632
633         if (more_to_do)
634                 blkif_notify_work(blkif);
635         if (notify)
636                 notify_remote_via_irq(blkif->irq);
637 }
638
639 static int __init blkif_init(void)
640 {
641         int i, mmap_pages;
642         int rc = 0;
643
644         if (!xen_pv_domain())
645                 return -ENODEV;
646
647         blkbk = (struct xen_blkbk *)kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
648         if (!blkbk) {
649                 printk(KERN_ALERT "%s: out of memory!\n", __func__);
650                 return -ENOMEM;
651         }
652
653         mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
654
655         blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
656                                         blkif_reqs, GFP_KERNEL);
657         blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
658                                         mmap_pages, GFP_KERNEL);
659         blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
660                                         mmap_pages, GFP_KERNEL);
661
662         if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
663                 rc = -ENOMEM;
664                 goto out_of_memory;
665         }
666
667         for (i = 0; i < mmap_pages; i++) {
668                 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
669                 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
670                 if (blkbk->pending_pages[i] == NULL) {
671                         rc = -ENOMEM;
672                         goto out_of_memory;
673                 }
674         }
675         rc = blkif_interface_init();
676         if (rc)
677                 goto failed_init;
678
679         memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
680
681         INIT_LIST_HEAD(&blkbk->pending_free);
682         spin_lock_init(&blkbk->pending_free_lock);
683         init_waitqueue_head(&blkbk->pending_free_wq);
684
685         for (i = 0; i < blkif_reqs; i++)
686                 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
687
688         rc = blkif_xenbus_init();
689         if (rc)
690                 goto failed_init;
691
692         return 0;
693
694  out_of_memory:
695         printk(KERN_ERR "%s: out of memory\n", __func__);
696  failed_init:
697         kfree(blkbk->pending_reqs);
698         kfree(blkbk->pending_grant_handles);
699         for (i = 0; i < mmap_pages; i++) {
700                 if (blkbk->pending_pages[i])
701                         __free_page(blkbk->pending_pages[i]);
702         }
703         kfree(blkbk->pending_pages);
704         kfree(blkbk);
705         blkbk = NULL;
706         return rc;
707 }
708
709 module_init(blkif_init);
710
711 MODULE_LICENSE("Dual BSD/GPL");