KVM: VMX: conditionally disable 2M pages
[firefly-linux-kernel-4.4.55.git] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "iodev.h"
19
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
46
47 #include <asm/processor.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
51
52 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
53 #include "coalesced_mmio.h"
54 #endif
55
56 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
57 #include <linux/pci.h>
58 #include <linux/interrupt.h>
59 #include "irq.h"
60 #endif
61
62 MODULE_AUTHOR("Qumranet");
63 MODULE_LICENSE("GPL");
64
65 /*
66  * Ordering of locks:
67  *
68  *              kvm->lock --> kvm->irq_lock
69  */
70
71 DEFINE_SPINLOCK(kvm_lock);
72 LIST_HEAD(vm_list);
73
74 static cpumask_var_t cpus_hardware_enabled;
75
76 struct kmem_cache *kvm_vcpu_cache;
77 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
78
79 static __read_mostly struct preempt_ops kvm_preempt_ops;
80
81 struct dentry *kvm_debugfs_dir;
82
83 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
84                            unsigned long arg);
85
86 static bool kvm_rebooting;
87
88 static bool largepages_enabled = true;
89
90 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
91 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
92                                                       int assigned_dev_id)
93 {
94         struct list_head *ptr;
95         struct kvm_assigned_dev_kernel *match;
96
97         list_for_each(ptr, head) {
98                 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
99                 if (match->assigned_dev_id == assigned_dev_id)
100                         return match;
101         }
102         return NULL;
103 }
104
105 static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
106                                     *assigned_dev, int irq)
107 {
108         int i, index;
109         struct msix_entry *host_msix_entries;
110
111         host_msix_entries = assigned_dev->host_msix_entries;
112
113         index = -1;
114         for (i = 0; i < assigned_dev->entries_nr; i++)
115                 if (irq == host_msix_entries[i].vector) {
116                         index = i;
117                         break;
118                 }
119         if (index < 0) {
120                 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
121                 return 0;
122         }
123
124         return index;
125 }
126
127 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
128 {
129         struct kvm_assigned_dev_kernel *assigned_dev;
130         struct kvm *kvm;
131         int i;
132
133         assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
134                                     interrupt_work);
135         kvm = assigned_dev->kvm;
136
137         mutex_lock(&kvm->irq_lock);
138         spin_lock_irq(&assigned_dev->assigned_dev_lock);
139         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
140                 struct kvm_guest_msix_entry *guest_entries =
141                         assigned_dev->guest_msix_entries;
142                 for (i = 0; i < assigned_dev->entries_nr; i++) {
143                         if (!(guest_entries[i].flags &
144                                         KVM_ASSIGNED_MSIX_PENDING))
145                                 continue;
146                         guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
147                         kvm_set_irq(assigned_dev->kvm,
148                                     assigned_dev->irq_source_id,
149                                     guest_entries[i].vector, 1);
150                 }
151         } else
152                 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
153                             assigned_dev->guest_irq, 1);
154
155         spin_unlock_irq(&assigned_dev->assigned_dev_lock);
156         mutex_unlock(&assigned_dev->kvm->irq_lock);
157 }
158
159 static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
160 {
161         unsigned long flags;
162         struct kvm_assigned_dev_kernel *assigned_dev =
163                 (struct kvm_assigned_dev_kernel *) dev_id;
164
165         spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
166         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
167                 int index = find_index_from_host_irq(assigned_dev, irq);
168                 if (index < 0)
169                         goto out;
170                 assigned_dev->guest_msix_entries[index].flags |=
171                         KVM_ASSIGNED_MSIX_PENDING;
172         }
173
174         schedule_work(&assigned_dev->interrupt_work);
175
176         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
177                 disable_irq_nosync(irq);
178                 assigned_dev->host_irq_disabled = true;
179         }
180
181 out:
182         spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
183         return IRQ_HANDLED;
184 }
185
186 /* Ack the irq line for an assigned device */
187 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
188 {
189         struct kvm_assigned_dev_kernel *dev;
190         unsigned long flags;
191
192         if (kian->gsi == -1)
193                 return;
194
195         dev = container_of(kian, struct kvm_assigned_dev_kernel,
196                            ack_notifier);
197
198         kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
199
200         /* The guest irq may be shared so this ack may be
201          * from another device.
202          */
203         spin_lock_irqsave(&dev->assigned_dev_lock, flags);
204         if (dev->host_irq_disabled) {
205                 enable_irq(dev->host_irq);
206                 dev->host_irq_disabled = false;
207         }
208         spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
209 }
210
211 static void deassign_guest_irq(struct kvm *kvm,
212                                struct kvm_assigned_dev_kernel *assigned_dev)
213 {
214         kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
215         assigned_dev->ack_notifier.gsi = -1;
216
217         if (assigned_dev->irq_source_id != -1)
218                 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
219         assigned_dev->irq_source_id = -1;
220         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
221 }
222
223 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
224 static void deassign_host_irq(struct kvm *kvm,
225                               struct kvm_assigned_dev_kernel *assigned_dev)
226 {
227         /*
228          * In kvm_free_device_irq, cancel_work_sync return true if:
229          * 1. work is scheduled, and then cancelled.
230          * 2. work callback is executed.
231          *
232          * The first one ensured that the irq is disabled and no more events
233          * would happen. But for the second one, the irq may be enabled (e.g.
234          * for MSI). So we disable irq here to prevent further events.
235          *
236          * Notice this maybe result in nested disable if the interrupt type is
237          * INTx, but it's OK for we are going to free it.
238          *
239          * If this function is a part of VM destroy, please ensure that till
240          * now, the kvm state is still legal for probably we also have to wait
241          * interrupt_work done.
242          */
243         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
244                 int i;
245                 for (i = 0; i < assigned_dev->entries_nr; i++)
246                         disable_irq_nosync(assigned_dev->
247                                            host_msix_entries[i].vector);
248
249                 cancel_work_sync(&assigned_dev->interrupt_work);
250
251                 for (i = 0; i < assigned_dev->entries_nr; i++)
252                         free_irq(assigned_dev->host_msix_entries[i].vector,
253                                  (void *)assigned_dev);
254
255                 assigned_dev->entries_nr = 0;
256                 kfree(assigned_dev->host_msix_entries);
257                 kfree(assigned_dev->guest_msix_entries);
258                 pci_disable_msix(assigned_dev->dev);
259         } else {
260                 /* Deal with MSI and INTx */
261                 disable_irq_nosync(assigned_dev->host_irq);
262                 cancel_work_sync(&assigned_dev->interrupt_work);
263
264                 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
265
266                 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
267                         pci_disable_msi(assigned_dev->dev);
268         }
269
270         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
271 }
272
273 static int kvm_deassign_irq(struct kvm *kvm,
274                             struct kvm_assigned_dev_kernel *assigned_dev,
275                             unsigned long irq_requested_type)
276 {
277         unsigned long guest_irq_type, host_irq_type;
278
279         if (!irqchip_in_kernel(kvm))
280                 return -EINVAL;
281         /* no irq assignment to deassign */
282         if (!assigned_dev->irq_requested_type)
283                 return -ENXIO;
284
285         host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
286         guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
287
288         if (host_irq_type)
289                 deassign_host_irq(kvm, assigned_dev);
290         if (guest_irq_type)
291                 deassign_guest_irq(kvm, assigned_dev);
292
293         return 0;
294 }
295
296 static void kvm_free_assigned_irq(struct kvm *kvm,
297                                   struct kvm_assigned_dev_kernel *assigned_dev)
298 {
299         kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
300 }
301
302 static void kvm_free_assigned_device(struct kvm *kvm,
303                                      struct kvm_assigned_dev_kernel
304                                      *assigned_dev)
305 {
306         kvm_free_assigned_irq(kvm, assigned_dev);
307
308         pci_reset_function(assigned_dev->dev);
309
310         pci_release_regions(assigned_dev->dev);
311         pci_disable_device(assigned_dev->dev);
312         pci_dev_put(assigned_dev->dev);
313
314         list_del(&assigned_dev->list);
315         kfree(assigned_dev);
316 }
317
318 void kvm_free_all_assigned_devices(struct kvm *kvm)
319 {
320         struct list_head *ptr, *ptr2;
321         struct kvm_assigned_dev_kernel *assigned_dev;
322
323         list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
324                 assigned_dev = list_entry(ptr,
325                                           struct kvm_assigned_dev_kernel,
326                                           list);
327
328                 kvm_free_assigned_device(kvm, assigned_dev);
329         }
330 }
331
332 static int assigned_device_enable_host_intx(struct kvm *kvm,
333                                             struct kvm_assigned_dev_kernel *dev)
334 {
335         dev->host_irq = dev->dev->irq;
336         /* Even though this is PCI, we don't want to use shared
337          * interrupts. Sharing host devices with guest-assigned devices
338          * on the same interrupt line is not a happy situation: there
339          * are going to be long delays in accepting, acking, etc.
340          */
341         if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
342                         0, "kvm_assigned_intx_device", (void *)dev))
343                 return -EIO;
344         return 0;
345 }
346
347 #ifdef __KVM_HAVE_MSI
348 static int assigned_device_enable_host_msi(struct kvm *kvm,
349                                            struct kvm_assigned_dev_kernel *dev)
350 {
351         int r;
352
353         if (!dev->dev->msi_enabled) {
354                 r = pci_enable_msi(dev->dev);
355                 if (r)
356                         return r;
357         }
358
359         dev->host_irq = dev->dev->irq;
360         if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
361                         "kvm_assigned_msi_device", (void *)dev)) {
362                 pci_disable_msi(dev->dev);
363                 return -EIO;
364         }
365
366         return 0;
367 }
368 #endif
369
370 #ifdef __KVM_HAVE_MSIX
371 static int assigned_device_enable_host_msix(struct kvm *kvm,
372                                             struct kvm_assigned_dev_kernel *dev)
373 {
374         int i, r = -EINVAL;
375
376         /* host_msix_entries and guest_msix_entries should have been
377          * initialized */
378         if (dev->entries_nr == 0)
379                 return r;
380
381         r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
382         if (r)
383                 return r;
384
385         for (i = 0; i < dev->entries_nr; i++) {
386                 r = request_irq(dev->host_msix_entries[i].vector,
387                                 kvm_assigned_dev_intr, 0,
388                                 "kvm_assigned_msix_device",
389                                 (void *)dev);
390                 /* FIXME: free requested_irq's on failure */
391                 if (r)
392                         return r;
393         }
394
395         return 0;
396 }
397
398 #endif
399
400 static int assigned_device_enable_guest_intx(struct kvm *kvm,
401                                 struct kvm_assigned_dev_kernel *dev,
402                                 struct kvm_assigned_irq *irq)
403 {
404         dev->guest_irq = irq->guest_irq;
405         dev->ack_notifier.gsi = irq->guest_irq;
406         return 0;
407 }
408
409 #ifdef __KVM_HAVE_MSI
410 static int assigned_device_enable_guest_msi(struct kvm *kvm,
411                         struct kvm_assigned_dev_kernel *dev,
412                         struct kvm_assigned_irq *irq)
413 {
414         dev->guest_irq = irq->guest_irq;
415         dev->ack_notifier.gsi = -1;
416         dev->host_irq_disabled = false;
417         return 0;
418 }
419 #endif
420 #ifdef __KVM_HAVE_MSIX
421 static int assigned_device_enable_guest_msix(struct kvm *kvm,
422                         struct kvm_assigned_dev_kernel *dev,
423                         struct kvm_assigned_irq *irq)
424 {
425         dev->guest_irq = irq->guest_irq;
426         dev->ack_notifier.gsi = -1;
427         dev->host_irq_disabled = false;
428         return 0;
429 }
430 #endif
431
432 static int assign_host_irq(struct kvm *kvm,
433                            struct kvm_assigned_dev_kernel *dev,
434                            __u32 host_irq_type)
435 {
436         int r = -EEXIST;
437
438         if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
439                 return r;
440
441         switch (host_irq_type) {
442         case KVM_DEV_IRQ_HOST_INTX:
443                 r = assigned_device_enable_host_intx(kvm, dev);
444                 break;
445 #ifdef __KVM_HAVE_MSI
446         case KVM_DEV_IRQ_HOST_MSI:
447                 r = assigned_device_enable_host_msi(kvm, dev);
448                 break;
449 #endif
450 #ifdef __KVM_HAVE_MSIX
451         case KVM_DEV_IRQ_HOST_MSIX:
452                 r = assigned_device_enable_host_msix(kvm, dev);
453                 break;
454 #endif
455         default:
456                 r = -EINVAL;
457         }
458
459         if (!r)
460                 dev->irq_requested_type |= host_irq_type;
461
462         return r;
463 }
464
465 static int assign_guest_irq(struct kvm *kvm,
466                             struct kvm_assigned_dev_kernel *dev,
467                             struct kvm_assigned_irq *irq,
468                             unsigned long guest_irq_type)
469 {
470         int id;
471         int r = -EEXIST;
472
473         if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
474                 return r;
475
476         id = kvm_request_irq_source_id(kvm);
477         if (id < 0)
478                 return id;
479
480         dev->irq_source_id = id;
481
482         switch (guest_irq_type) {
483         case KVM_DEV_IRQ_GUEST_INTX:
484                 r = assigned_device_enable_guest_intx(kvm, dev, irq);
485                 break;
486 #ifdef __KVM_HAVE_MSI
487         case KVM_DEV_IRQ_GUEST_MSI:
488                 r = assigned_device_enable_guest_msi(kvm, dev, irq);
489                 break;
490 #endif
491 #ifdef __KVM_HAVE_MSIX
492         case KVM_DEV_IRQ_GUEST_MSIX:
493                 r = assigned_device_enable_guest_msix(kvm, dev, irq);
494                 break;
495 #endif
496         default:
497                 r = -EINVAL;
498         }
499
500         if (!r) {
501                 dev->irq_requested_type |= guest_irq_type;
502                 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
503         } else
504                 kvm_free_irq_source_id(kvm, dev->irq_source_id);
505
506         return r;
507 }
508
509 /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
510 static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
511                                    struct kvm_assigned_irq *assigned_irq)
512 {
513         int r = -EINVAL;
514         struct kvm_assigned_dev_kernel *match;
515         unsigned long host_irq_type, guest_irq_type;
516
517         if (!capable(CAP_SYS_RAWIO))
518                 return -EPERM;
519
520         if (!irqchip_in_kernel(kvm))
521                 return r;
522
523         mutex_lock(&kvm->lock);
524         r = -ENODEV;
525         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
526                                       assigned_irq->assigned_dev_id);
527         if (!match)
528                 goto out;
529
530         host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
531         guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
532
533         r = -EINVAL;
534         /* can only assign one type at a time */
535         if (hweight_long(host_irq_type) > 1)
536                 goto out;
537         if (hweight_long(guest_irq_type) > 1)
538                 goto out;
539         if (host_irq_type == 0 && guest_irq_type == 0)
540                 goto out;
541
542         r = 0;
543         if (host_irq_type)
544                 r = assign_host_irq(kvm, match, host_irq_type);
545         if (r)
546                 goto out;
547
548         if (guest_irq_type)
549                 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
550 out:
551         mutex_unlock(&kvm->lock);
552         return r;
553 }
554
555 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
556                                          struct kvm_assigned_irq
557                                          *assigned_irq)
558 {
559         int r = -ENODEV;
560         struct kvm_assigned_dev_kernel *match;
561
562         mutex_lock(&kvm->lock);
563
564         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
565                                       assigned_irq->assigned_dev_id);
566         if (!match)
567                 goto out;
568
569         r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
570 out:
571         mutex_unlock(&kvm->lock);
572         return r;
573 }
574
575 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
576                                       struct kvm_assigned_pci_dev *assigned_dev)
577 {
578         int r = 0;
579         struct kvm_assigned_dev_kernel *match;
580         struct pci_dev *dev;
581
582         down_read(&kvm->slots_lock);
583         mutex_lock(&kvm->lock);
584
585         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
586                                       assigned_dev->assigned_dev_id);
587         if (match) {
588                 /* device already assigned */
589                 r = -EEXIST;
590                 goto out;
591         }
592
593         match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
594         if (match == NULL) {
595                 printk(KERN_INFO "%s: Couldn't allocate memory\n",
596                        __func__);
597                 r = -ENOMEM;
598                 goto out;
599         }
600         dev = pci_get_bus_and_slot(assigned_dev->busnr,
601                                    assigned_dev->devfn);
602         if (!dev) {
603                 printk(KERN_INFO "%s: host device not found\n", __func__);
604                 r = -EINVAL;
605                 goto out_free;
606         }
607         if (pci_enable_device(dev)) {
608                 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
609                 r = -EBUSY;
610                 goto out_put;
611         }
612         r = pci_request_regions(dev, "kvm_assigned_device");
613         if (r) {
614                 printk(KERN_INFO "%s: Could not get access to device regions\n",
615                        __func__);
616                 goto out_disable;
617         }
618
619         pci_reset_function(dev);
620
621         match->assigned_dev_id = assigned_dev->assigned_dev_id;
622         match->host_busnr = assigned_dev->busnr;
623         match->host_devfn = assigned_dev->devfn;
624         match->flags = assigned_dev->flags;
625         match->dev = dev;
626         spin_lock_init(&match->assigned_dev_lock);
627         match->irq_source_id = -1;
628         match->kvm = kvm;
629         match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
630         INIT_WORK(&match->interrupt_work,
631                   kvm_assigned_dev_interrupt_work_handler);
632
633         list_add(&match->list, &kvm->arch.assigned_dev_head);
634
635         if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
636                 if (!kvm->arch.iommu_domain) {
637                         r = kvm_iommu_map_guest(kvm);
638                         if (r)
639                                 goto out_list_del;
640                 }
641                 r = kvm_assign_device(kvm, match);
642                 if (r)
643                         goto out_list_del;
644         }
645
646 out:
647         mutex_unlock(&kvm->lock);
648         up_read(&kvm->slots_lock);
649         return r;
650 out_list_del:
651         list_del(&match->list);
652         pci_release_regions(dev);
653 out_disable:
654         pci_disable_device(dev);
655 out_put:
656         pci_dev_put(dev);
657 out_free:
658         kfree(match);
659         mutex_unlock(&kvm->lock);
660         up_read(&kvm->slots_lock);
661         return r;
662 }
663 #endif
664
665 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
666 static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
667                 struct kvm_assigned_pci_dev *assigned_dev)
668 {
669         int r = 0;
670         struct kvm_assigned_dev_kernel *match;
671
672         mutex_lock(&kvm->lock);
673
674         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
675                                       assigned_dev->assigned_dev_id);
676         if (!match) {
677                 printk(KERN_INFO "%s: device hasn't been assigned before, "
678                   "so cannot be deassigned\n", __func__);
679                 r = -EINVAL;
680                 goto out;
681         }
682
683         if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
684                 kvm_deassign_device(kvm, match);
685
686         kvm_free_assigned_device(kvm, match);
687
688 out:
689         mutex_unlock(&kvm->lock);
690         return r;
691 }
692 #endif
693
694 inline int kvm_is_mmio_pfn(pfn_t pfn)
695 {
696         if (pfn_valid(pfn)) {
697                 struct page *page = compound_head(pfn_to_page(pfn));
698                 return PageReserved(page);
699         }
700
701         return true;
702 }
703
704 /*
705  * Switches to specified vcpu, until a matching vcpu_put()
706  */
707 void vcpu_load(struct kvm_vcpu *vcpu)
708 {
709         int cpu;
710
711         mutex_lock(&vcpu->mutex);
712         cpu = get_cpu();
713         preempt_notifier_register(&vcpu->preempt_notifier);
714         kvm_arch_vcpu_load(vcpu, cpu);
715         put_cpu();
716 }
717
718 void vcpu_put(struct kvm_vcpu *vcpu)
719 {
720         preempt_disable();
721         kvm_arch_vcpu_put(vcpu);
722         preempt_notifier_unregister(&vcpu->preempt_notifier);
723         preempt_enable();
724         mutex_unlock(&vcpu->mutex);
725 }
726
727 static void ack_flush(void *_completed)
728 {
729 }
730
731 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
732 {
733         int i, cpu, me;
734         cpumask_var_t cpus;
735         bool called = true;
736         struct kvm_vcpu *vcpu;
737
738         if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
739                 cpumask_clear(cpus);
740
741         me = get_cpu();
742         spin_lock(&kvm->requests_lock);
743         kvm_for_each_vcpu(i, vcpu, kvm) {
744                 if (test_and_set_bit(req, &vcpu->requests))
745                         continue;
746                 cpu = vcpu->cpu;
747                 if (cpus != NULL && cpu != -1 && cpu != me)
748                         cpumask_set_cpu(cpu, cpus);
749         }
750         if (unlikely(cpus == NULL))
751                 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
752         else if (!cpumask_empty(cpus))
753                 smp_call_function_many(cpus, ack_flush, NULL, 1);
754         else
755                 called = false;
756         spin_unlock(&kvm->requests_lock);
757         put_cpu();
758         free_cpumask_var(cpus);
759         return called;
760 }
761
762 void kvm_flush_remote_tlbs(struct kvm *kvm)
763 {
764         if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
765                 ++kvm->stat.remote_tlb_flush;
766 }
767
768 void kvm_reload_remote_mmus(struct kvm *kvm)
769 {
770         make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
771 }
772
773 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
774 {
775         struct page *page;
776         int r;
777
778         mutex_init(&vcpu->mutex);
779         vcpu->cpu = -1;
780         vcpu->kvm = kvm;
781         vcpu->vcpu_id = id;
782         init_waitqueue_head(&vcpu->wq);
783
784         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
785         if (!page) {
786                 r = -ENOMEM;
787                 goto fail;
788         }
789         vcpu->run = page_address(page);
790
791         r = kvm_arch_vcpu_init(vcpu);
792         if (r < 0)
793                 goto fail_free_run;
794         return 0;
795
796 fail_free_run:
797         free_page((unsigned long)vcpu->run);
798 fail:
799         return r;
800 }
801 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
802
803 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
804 {
805         kvm_arch_vcpu_uninit(vcpu);
806         free_page((unsigned long)vcpu->run);
807 }
808 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
809
810 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
811 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
812 {
813         return container_of(mn, struct kvm, mmu_notifier);
814 }
815
816 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
817                                              struct mm_struct *mm,
818                                              unsigned long address)
819 {
820         struct kvm *kvm = mmu_notifier_to_kvm(mn);
821         int need_tlb_flush;
822
823         /*
824          * When ->invalidate_page runs, the linux pte has been zapped
825          * already but the page is still allocated until
826          * ->invalidate_page returns. So if we increase the sequence
827          * here the kvm page fault will notice if the spte can't be
828          * established because the page is going to be freed. If
829          * instead the kvm page fault establishes the spte before
830          * ->invalidate_page runs, kvm_unmap_hva will release it
831          * before returning.
832          *
833          * The sequence increase only need to be seen at spin_unlock
834          * time, and not at spin_lock time.
835          *
836          * Increasing the sequence after the spin_unlock would be
837          * unsafe because the kvm page fault could then establish the
838          * pte after kvm_unmap_hva returned, without noticing the page
839          * is going to be freed.
840          */
841         spin_lock(&kvm->mmu_lock);
842         kvm->mmu_notifier_seq++;
843         need_tlb_flush = kvm_unmap_hva(kvm, address);
844         spin_unlock(&kvm->mmu_lock);
845
846         /* we've to flush the tlb before the pages can be freed */
847         if (need_tlb_flush)
848                 kvm_flush_remote_tlbs(kvm);
849
850 }
851
852 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
853                                                     struct mm_struct *mm,
854                                                     unsigned long start,
855                                                     unsigned long end)
856 {
857         struct kvm *kvm = mmu_notifier_to_kvm(mn);
858         int need_tlb_flush = 0;
859
860         spin_lock(&kvm->mmu_lock);
861         /*
862          * The count increase must become visible at unlock time as no
863          * spte can be established without taking the mmu_lock and
864          * count is also read inside the mmu_lock critical section.
865          */
866         kvm->mmu_notifier_count++;
867         for (; start < end; start += PAGE_SIZE)
868                 need_tlb_flush |= kvm_unmap_hva(kvm, start);
869         spin_unlock(&kvm->mmu_lock);
870
871         /* we've to flush the tlb before the pages can be freed */
872         if (need_tlb_flush)
873                 kvm_flush_remote_tlbs(kvm);
874 }
875
876 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
877                                                   struct mm_struct *mm,
878                                                   unsigned long start,
879                                                   unsigned long end)
880 {
881         struct kvm *kvm = mmu_notifier_to_kvm(mn);
882
883         spin_lock(&kvm->mmu_lock);
884         /*
885          * This sequence increase will notify the kvm page fault that
886          * the page that is going to be mapped in the spte could have
887          * been freed.
888          */
889         kvm->mmu_notifier_seq++;
890         /*
891          * The above sequence increase must be visible before the
892          * below count decrease but both values are read by the kvm
893          * page fault under mmu_lock spinlock so we don't need to add
894          * a smb_wmb() here in between the two.
895          */
896         kvm->mmu_notifier_count--;
897         spin_unlock(&kvm->mmu_lock);
898
899         BUG_ON(kvm->mmu_notifier_count < 0);
900 }
901
902 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
903                                               struct mm_struct *mm,
904                                               unsigned long address)
905 {
906         struct kvm *kvm = mmu_notifier_to_kvm(mn);
907         int young;
908
909         spin_lock(&kvm->mmu_lock);
910         young = kvm_age_hva(kvm, address);
911         spin_unlock(&kvm->mmu_lock);
912
913         if (young)
914                 kvm_flush_remote_tlbs(kvm);
915
916         return young;
917 }
918
919 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
920                                      struct mm_struct *mm)
921 {
922         struct kvm *kvm = mmu_notifier_to_kvm(mn);
923         kvm_arch_flush_shadow(kvm);
924 }
925
926 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
927         .invalidate_page        = kvm_mmu_notifier_invalidate_page,
928         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
929         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
930         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
931         .release                = kvm_mmu_notifier_release,
932 };
933 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
934
935 static struct kvm *kvm_create_vm(void)
936 {
937         struct kvm *kvm = kvm_arch_create_vm();
938 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
939         struct page *page;
940 #endif
941
942         if (IS_ERR(kvm))
943                 goto out;
944 #ifdef CONFIG_HAVE_KVM_IRQCHIP
945         INIT_LIST_HEAD(&kvm->irq_routing);
946         INIT_HLIST_HEAD(&kvm->mask_notifier_list);
947 #endif
948
949 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
950         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
951         if (!page) {
952                 kfree(kvm);
953                 return ERR_PTR(-ENOMEM);
954         }
955         kvm->coalesced_mmio_ring =
956                         (struct kvm_coalesced_mmio_ring *)page_address(page);
957 #endif
958
959 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
960         {
961                 int err;
962                 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
963                 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
964                 if (err) {
965 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
966                         put_page(page);
967 #endif
968                         kfree(kvm);
969                         return ERR_PTR(err);
970                 }
971         }
972 #endif
973
974         kvm->mm = current->mm;
975         atomic_inc(&kvm->mm->mm_count);
976         spin_lock_init(&kvm->mmu_lock);
977         spin_lock_init(&kvm->requests_lock);
978         kvm_io_bus_init(&kvm->pio_bus);
979         kvm_irqfd_init(kvm);
980         mutex_init(&kvm->lock);
981         mutex_init(&kvm->irq_lock);
982         kvm_io_bus_init(&kvm->mmio_bus);
983         init_rwsem(&kvm->slots_lock);
984         atomic_set(&kvm->users_count, 1);
985         spin_lock(&kvm_lock);
986         list_add(&kvm->vm_list, &vm_list);
987         spin_unlock(&kvm_lock);
988 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
989         kvm_coalesced_mmio_init(kvm);
990 #endif
991 out:
992         return kvm;
993 }
994
995 /*
996  * Free any memory in @free but not in @dont.
997  */
998 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
999                                   struct kvm_memory_slot *dont)
1000 {
1001         if (!dont || free->rmap != dont->rmap)
1002                 vfree(free->rmap);
1003
1004         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
1005                 vfree(free->dirty_bitmap);
1006
1007         if (!dont || free->lpage_info != dont->lpage_info)
1008                 vfree(free->lpage_info);
1009
1010         free->npages = 0;
1011         free->dirty_bitmap = NULL;
1012         free->rmap = NULL;
1013         free->lpage_info = NULL;
1014 }
1015
1016 void kvm_free_physmem(struct kvm *kvm)
1017 {
1018         int i;
1019
1020         for (i = 0; i < kvm->nmemslots; ++i)
1021                 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
1022 }
1023
1024 static void kvm_destroy_vm(struct kvm *kvm)
1025 {
1026         struct mm_struct *mm = kvm->mm;
1027
1028         kvm_arch_sync_events(kvm);
1029         spin_lock(&kvm_lock);
1030         list_del(&kvm->vm_list);
1031         spin_unlock(&kvm_lock);
1032         kvm_free_irq_routing(kvm);
1033         kvm_io_bus_destroy(&kvm->pio_bus);
1034         kvm_io_bus_destroy(&kvm->mmio_bus);
1035 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1036         if (kvm->coalesced_mmio_ring != NULL)
1037                 free_page((unsigned long)kvm->coalesced_mmio_ring);
1038 #endif
1039 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1040         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1041 #else
1042         kvm_arch_flush_shadow(kvm);
1043 #endif
1044         kvm_arch_destroy_vm(kvm);
1045         mmdrop(mm);
1046 }
1047
1048 void kvm_get_kvm(struct kvm *kvm)
1049 {
1050         atomic_inc(&kvm->users_count);
1051 }
1052 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1053
1054 void kvm_put_kvm(struct kvm *kvm)
1055 {
1056         if (atomic_dec_and_test(&kvm->users_count))
1057                 kvm_destroy_vm(kvm);
1058 }
1059 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1060
1061
1062 static int kvm_vm_release(struct inode *inode, struct file *filp)
1063 {
1064         struct kvm *kvm = filp->private_data;
1065
1066         kvm_irqfd_release(kvm);
1067
1068         kvm_put_kvm(kvm);
1069         return 0;
1070 }
1071
1072 /*
1073  * Allocate some memory and give it an address in the guest physical address
1074  * space.
1075  *
1076  * Discontiguous memory is allowed, mostly for framebuffers.
1077  *
1078  * Must be called holding mmap_sem for write.
1079  */
1080 int __kvm_set_memory_region(struct kvm *kvm,
1081                             struct kvm_userspace_memory_region *mem,
1082                             int user_alloc)
1083 {
1084         int r;
1085         gfn_t base_gfn;
1086         unsigned long npages, ugfn;
1087         unsigned long largepages, i;
1088         struct kvm_memory_slot *memslot;
1089         struct kvm_memory_slot old, new;
1090
1091         r = -EINVAL;
1092         /* General sanity checks */
1093         if (mem->memory_size & (PAGE_SIZE - 1))
1094                 goto out;
1095         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1096                 goto out;
1097         if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
1098                 goto out;
1099         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
1100                 goto out;
1101         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1102                 goto out;
1103
1104         memslot = &kvm->memslots[mem->slot];
1105         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1106         npages = mem->memory_size >> PAGE_SHIFT;
1107
1108         if (!npages)
1109                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1110
1111         new = old = *memslot;
1112
1113         new.base_gfn = base_gfn;
1114         new.npages = npages;
1115         new.flags = mem->flags;
1116
1117         /* Disallow changing a memory slot's size. */
1118         r = -EINVAL;
1119         if (npages && old.npages && npages != old.npages)
1120                 goto out_free;
1121
1122         /* Check for overlaps */
1123         r = -EEXIST;
1124         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1125                 struct kvm_memory_slot *s = &kvm->memslots[i];
1126
1127                 if (s == memslot || !s->npages)
1128                         continue;
1129                 if (!((base_gfn + npages <= s->base_gfn) ||
1130                       (base_gfn >= s->base_gfn + s->npages)))
1131                         goto out_free;
1132         }
1133
1134         /* Free page dirty bitmap if unneeded */
1135         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1136                 new.dirty_bitmap = NULL;
1137
1138         r = -ENOMEM;
1139
1140         /* Allocate if a slot is being created */
1141 #ifndef CONFIG_S390
1142         if (npages && !new.rmap) {
1143                 new.rmap = vmalloc(npages * sizeof(struct page *));
1144
1145                 if (!new.rmap)
1146                         goto out_free;
1147
1148                 memset(new.rmap, 0, npages * sizeof(*new.rmap));
1149
1150                 new.user_alloc = user_alloc;
1151                 /*
1152                  * hva_to_rmmap() serialzies with the mmu_lock and to be
1153                  * safe it has to ignore memslots with !user_alloc &&
1154                  * !userspace_addr.
1155                  */
1156                 if (user_alloc)
1157                         new.userspace_addr = mem->userspace_addr;
1158                 else
1159                         new.userspace_addr = 0;
1160         }
1161         if (npages && !new.lpage_info) {
1162                 largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
1163                 largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
1164
1165                 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
1166
1167                 if (!new.lpage_info)
1168                         goto out_free;
1169
1170                 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
1171
1172                 if (base_gfn % KVM_PAGES_PER_HPAGE)
1173                         new.lpage_info[0].write_count = 1;
1174                 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
1175                         new.lpage_info[largepages-1].write_count = 1;
1176                 ugfn = new.userspace_addr >> PAGE_SHIFT;
1177                 /*
1178                  * If the gfn and userspace address are not aligned wrt each
1179                  * other, or if explicitly asked to, disable large page
1180                  * support for this slot
1181                  */
1182                 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) ||
1183                     !largepages_enabled)
1184                         for (i = 0; i < largepages; ++i)
1185                                 new.lpage_info[i].write_count = 1;
1186         }
1187
1188         /* Allocate page dirty bitmap if needed */
1189         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1190                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1191
1192                 new.dirty_bitmap = vmalloc(dirty_bytes);
1193                 if (!new.dirty_bitmap)
1194                         goto out_free;
1195                 memset(new.dirty_bitmap, 0, dirty_bytes);
1196                 if (old.npages)
1197                         kvm_arch_flush_shadow(kvm);
1198         }
1199 #endif /* not defined CONFIG_S390 */
1200
1201         if (!npages)
1202                 kvm_arch_flush_shadow(kvm);
1203
1204         spin_lock(&kvm->mmu_lock);
1205         if (mem->slot >= kvm->nmemslots)
1206                 kvm->nmemslots = mem->slot + 1;
1207
1208         *memslot = new;
1209         spin_unlock(&kvm->mmu_lock);
1210
1211         r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1212         if (r) {
1213                 spin_lock(&kvm->mmu_lock);
1214                 *memslot = old;
1215                 spin_unlock(&kvm->mmu_lock);
1216                 goto out_free;
1217         }
1218
1219         kvm_free_physmem_slot(&old, npages ? &new : NULL);
1220         /* Slot deletion case: we have to update the current slot */
1221         spin_lock(&kvm->mmu_lock);
1222         if (!npages)
1223                 *memslot = old;
1224         spin_unlock(&kvm->mmu_lock);
1225 #ifdef CONFIG_DMAR
1226         /* map the pages in iommu page table */
1227         r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1228         if (r)
1229                 goto out;
1230 #endif
1231         return 0;
1232
1233 out_free:
1234         kvm_free_physmem_slot(&new, &old);
1235 out:
1236         return r;
1237
1238 }
1239 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1240
1241 int kvm_set_memory_region(struct kvm *kvm,
1242                           struct kvm_userspace_memory_region *mem,
1243                           int user_alloc)
1244 {
1245         int r;
1246
1247         down_write(&kvm->slots_lock);
1248         r = __kvm_set_memory_region(kvm, mem, user_alloc);
1249         up_write(&kvm->slots_lock);
1250         return r;
1251 }
1252 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1253
1254 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1255                                    struct
1256                                    kvm_userspace_memory_region *mem,
1257                                    int user_alloc)
1258 {
1259         if (mem->slot >= KVM_MEMORY_SLOTS)
1260                 return -EINVAL;
1261         return kvm_set_memory_region(kvm, mem, user_alloc);
1262 }
1263
1264 int kvm_get_dirty_log(struct kvm *kvm,
1265                         struct kvm_dirty_log *log, int *is_dirty)
1266 {
1267         struct kvm_memory_slot *memslot;
1268         int r, i;
1269         int n;
1270         unsigned long any = 0;
1271
1272         r = -EINVAL;
1273         if (log->slot >= KVM_MEMORY_SLOTS)
1274                 goto out;
1275
1276         memslot = &kvm->memslots[log->slot];
1277         r = -ENOENT;
1278         if (!memslot->dirty_bitmap)
1279                 goto out;
1280
1281         n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1282
1283         for (i = 0; !any && i < n/sizeof(long); ++i)
1284                 any = memslot->dirty_bitmap[i];
1285
1286         r = -EFAULT;
1287         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1288                 goto out;
1289
1290         if (any)
1291                 *is_dirty = 1;
1292
1293         r = 0;
1294 out:
1295         return r;
1296 }
1297
1298 void kvm_disable_largepages(void)
1299 {
1300         largepages_enabled = false;
1301 }
1302 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1303
1304 int is_error_page(struct page *page)
1305 {
1306         return page == bad_page;
1307 }
1308 EXPORT_SYMBOL_GPL(is_error_page);
1309
1310 int is_error_pfn(pfn_t pfn)
1311 {
1312         return pfn == bad_pfn;
1313 }
1314 EXPORT_SYMBOL_GPL(is_error_pfn);
1315
1316 static inline unsigned long bad_hva(void)
1317 {
1318         return PAGE_OFFSET;
1319 }
1320
1321 int kvm_is_error_hva(unsigned long addr)
1322 {
1323         return addr == bad_hva();
1324 }
1325 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1326
1327 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
1328 {
1329         int i;
1330
1331         for (i = 0; i < kvm->nmemslots; ++i) {
1332                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1333
1334                 if (gfn >= memslot->base_gfn
1335                     && gfn < memslot->base_gfn + memslot->npages)
1336                         return memslot;
1337         }
1338         return NULL;
1339 }
1340 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
1341
1342 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1343 {
1344         gfn = unalias_gfn(kvm, gfn);
1345         return gfn_to_memslot_unaliased(kvm, gfn);
1346 }
1347
1348 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1349 {
1350         int i;
1351
1352         gfn = unalias_gfn(kvm, gfn);
1353         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1354                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1355
1356                 if (gfn >= memslot->base_gfn
1357                     && gfn < memslot->base_gfn + memslot->npages)
1358                         return 1;
1359         }
1360         return 0;
1361 }
1362 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1363
1364 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1365 {
1366         struct kvm_memory_slot *slot;
1367
1368         gfn = unalias_gfn(kvm, gfn);
1369         slot = gfn_to_memslot_unaliased(kvm, gfn);
1370         if (!slot)
1371                 return bad_hva();
1372         return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1373 }
1374 EXPORT_SYMBOL_GPL(gfn_to_hva);
1375
1376 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1377 {
1378         struct page *page[1];
1379         unsigned long addr;
1380         int npages;
1381         pfn_t pfn;
1382
1383         might_sleep();
1384
1385         addr = gfn_to_hva(kvm, gfn);
1386         if (kvm_is_error_hva(addr)) {
1387                 get_page(bad_page);
1388                 return page_to_pfn(bad_page);
1389         }
1390
1391         npages = get_user_pages_fast(addr, 1, 1, page);
1392
1393         if (unlikely(npages != 1)) {
1394                 struct vm_area_struct *vma;
1395
1396                 down_read(&current->mm->mmap_sem);
1397                 vma = find_vma(current->mm, addr);
1398
1399                 if (vma == NULL || addr < vma->vm_start ||
1400                     !(vma->vm_flags & VM_PFNMAP)) {
1401                         up_read(&current->mm->mmap_sem);
1402                         get_page(bad_page);
1403                         return page_to_pfn(bad_page);
1404                 }
1405
1406                 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1407                 up_read(&current->mm->mmap_sem);
1408                 BUG_ON(!kvm_is_mmio_pfn(pfn));
1409         } else
1410                 pfn = page_to_pfn(page[0]);
1411
1412         return pfn;
1413 }
1414
1415 EXPORT_SYMBOL_GPL(gfn_to_pfn);
1416
1417 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1418 {
1419         pfn_t pfn;
1420
1421         pfn = gfn_to_pfn(kvm, gfn);
1422         if (!kvm_is_mmio_pfn(pfn))
1423                 return pfn_to_page(pfn);
1424
1425         WARN_ON(kvm_is_mmio_pfn(pfn));
1426
1427         get_page(bad_page);
1428         return bad_page;
1429 }
1430
1431 EXPORT_SYMBOL_GPL(gfn_to_page);
1432
1433 void kvm_release_page_clean(struct page *page)
1434 {
1435         kvm_release_pfn_clean(page_to_pfn(page));
1436 }
1437 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1438
1439 void kvm_release_pfn_clean(pfn_t pfn)
1440 {
1441         if (!kvm_is_mmio_pfn(pfn))
1442                 put_page(pfn_to_page(pfn));
1443 }
1444 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1445
1446 void kvm_release_page_dirty(struct page *page)
1447 {
1448         kvm_release_pfn_dirty(page_to_pfn(page));
1449 }
1450 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1451
1452 void kvm_release_pfn_dirty(pfn_t pfn)
1453 {
1454         kvm_set_pfn_dirty(pfn);
1455         kvm_release_pfn_clean(pfn);
1456 }
1457 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1458
1459 void kvm_set_page_dirty(struct page *page)
1460 {
1461         kvm_set_pfn_dirty(page_to_pfn(page));
1462 }
1463 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1464
1465 void kvm_set_pfn_dirty(pfn_t pfn)
1466 {
1467         if (!kvm_is_mmio_pfn(pfn)) {
1468                 struct page *page = pfn_to_page(pfn);
1469                 if (!PageReserved(page))
1470                         SetPageDirty(page);
1471         }
1472 }
1473 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1474
1475 void kvm_set_pfn_accessed(pfn_t pfn)
1476 {
1477         if (!kvm_is_mmio_pfn(pfn))
1478                 mark_page_accessed(pfn_to_page(pfn));
1479 }
1480 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1481
1482 void kvm_get_pfn(pfn_t pfn)
1483 {
1484         if (!kvm_is_mmio_pfn(pfn))
1485                 get_page(pfn_to_page(pfn));
1486 }
1487 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1488
1489 static int next_segment(unsigned long len, int offset)
1490 {
1491         if (len > PAGE_SIZE - offset)
1492                 return PAGE_SIZE - offset;
1493         else
1494                 return len;
1495 }
1496
1497 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1498                         int len)
1499 {
1500         int r;
1501         unsigned long addr;
1502
1503         addr = gfn_to_hva(kvm, gfn);
1504         if (kvm_is_error_hva(addr))
1505                 return -EFAULT;
1506         r = copy_from_user(data, (void __user *)addr + offset, len);
1507         if (r)
1508                 return -EFAULT;
1509         return 0;
1510 }
1511 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1512
1513 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1514 {
1515         gfn_t gfn = gpa >> PAGE_SHIFT;
1516         int seg;
1517         int offset = offset_in_page(gpa);
1518         int ret;
1519
1520         while ((seg = next_segment(len, offset)) != 0) {
1521                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1522                 if (ret < 0)
1523                         return ret;
1524                 offset = 0;
1525                 len -= seg;
1526                 data += seg;
1527                 ++gfn;
1528         }
1529         return 0;
1530 }
1531 EXPORT_SYMBOL_GPL(kvm_read_guest);
1532
1533 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1534                           unsigned long len)
1535 {
1536         int r;
1537         unsigned long addr;
1538         gfn_t gfn = gpa >> PAGE_SHIFT;
1539         int offset = offset_in_page(gpa);
1540
1541         addr = gfn_to_hva(kvm, gfn);
1542         if (kvm_is_error_hva(addr))
1543                 return -EFAULT;
1544         pagefault_disable();
1545         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1546         pagefault_enable();
1547         if (r)
1548                 return -EFAULT;
1549         return 0;
1550 }
1551 EXPORT_SYMBOL(kvm_read_guest_atomic);
1552
1553 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1554                          int offset, int len)
1555 {
1556         int r;
1557         unsigned long addr;
1558
1559         addr = gfn_to_hva(kvm, gfn);
1560         if (kvm_is_error_hva(addr))
1561                 return -EFAULT;
1562         r = copy_to_user((void __user *)addr + offset, data, len);
1563         if (r)
1564                 return -EFAULT;
1565         mark_page_dirty(kvm, gfn);
1566         return 0;
1567 }
1568 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1569
1570 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1571                     unsigned long len)
1572 {
1573         gfn_t gfn = gpa >> PAGE_SHIFT;
1574         int seg;
1575         int offset = offset_in_page(gpa);
1576         int ret;
1577
1578         while ((seg = next_segment(len, offset)) != 0) {
1579                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1580                 if (ret < 0)
1581                         return ret;
1582                 offset = 0;
1583                 len -= seg;
1584                 data += seg;
1585                 ++gfn;
1586         }
1587         return 0;
1588 }
1589
1590 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1591 {
1592         return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1593 }
1594 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1595
1596 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1597 {
1598         gfn_t gfn = gpa >> PAGE_SHIFT;
1599         int seg;
1600         int offset = offset_in_page(gpa);
1601         int ret;
1602
1603         while ((seg = next_segment(len, offset)) != 0) {
1604                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1605                 if (ret < 0)
1606                         return ret;
1607                 offset = 0;
1608                 len -= seg;
1609                 ++gfn;
1610         }
1611         return 0;
1612 }
1613 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1614
1615 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1616 {
1617         struct kvm_memory_slot *memslot;
1618
1619         gfn = unalias_gfn(kvm, gfn);
1620         memslot = gfn_to_memslot_unaliased(kvm, gfn);
1621         if (memslot && memslot->dirty_bitmap) {
1622                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1623
1624                 /* avoid RMW */
1625                 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1626                         set_bit(rel_gfn, memslot->dirty_bitmap);
1627         }
1628 }
1629
1630 /*
1631  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1632  */
1633 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1634 {
1635         DEFINE_WAIT(wait);
1636
1637         for (;;) {
1638                 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1639
1640                 if ((kvm_arch_interrupt_allowed(vcpu) &&
1641                                         kvm_cpu_has_interrupt(vcpu)) ||
1642                                 kvm_arch_vcpu_runnable(vcpu)) {
1643                         set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1644                         break;
1645                 }
1646                 if (kvm_cpu_has_pending_timer(vcpu))
1647                         break;
1648                 if (signal_pending(current))
1649                         break;
1650
1651                 vcpu_put(vcpu);
1652                 schedule();
1653                 vcpu_load(vcpu);
1654         }
1655
1656         finish_wait(&vcpu->wq, &wait);
1657 }
1658
1659 void kvm_resched(struct kvm_vcpu *vcpu)
1660 {
1661         if (!need_resched())
1662                 return;
1663         cond_resched();
1664 }
1665 EXPORT_SYMBOL_GPL(kvm_resched);
1666
1667 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1668 {
1669         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1670         struct page *page;
1671
1672         if (vmf->pgoff == 0)
1673                 page = virt_to_page(vcpu->run);
1674 #ifdef CONFIG_X86
1675         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1676                 page = virt_to_page(vcpu->arch.pio_data);
1677 #endif
1678 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1679         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1680                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1681 #endif
1682         else
1683                 return VM_FAULT_SIGBUS;
1684         get_page(page);
1685         vmf->page = page;
1686         return 0;
1687 }
1688
1689 static struct vm_operations_struct kvm_vcpu_vm_ops = {
1690         .fault = kvm_vcpu_fault,
1691 };
1692
1693 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1694 {
1695         vma->vm_ops = &kvm_vcpu_vm_ops;
1696         return 0;
1697 }
1698
1699 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1700 {
1701         struct kvm_vcpu *vcpu = filp->private_data;
1702
1703         kvm_put_kvm(vcpu->kvm);
1704         return 0;
1705 }
1706
1707 static struct file_operations kvm_vcpu_fops = {
1708         .release        = kvm_vcpu_release,
1709         .unlocked_ioctl = kvm_vcpu_ioctl,
1710         .compat_ioctl   = kvm_vcpu_ioctl,
1711         .mmap           = kvm_vcpu_mmap,
1712 };
1713
1714 /*
1715  * Allocates an inode for the vcpu.
1716  */
1717 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1718 {
1719         return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
1720 }
1721
1722 /*
1723  * Creates some virtual cpus.  Good luck creating more than one.
1724  */
1725 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1726 {
1727         int r;
1728         struct kvm_vcpu *vcpu, *v;
1729
1730         vcpu = kvm_arch_vcpu_create(kvm, id);
1731         if (IS_ERR(vcpu))
1732                 return PTR_ERR(vcpu);
1733
1734         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1735
1736         r = kvm_arch_vcpu_setup(vcpu);
1737         if (r)
1738                 return r;
1739
1740         mutex_lock(&kvm->lock);
1741         if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1742                 r = -EINVAL;
1743                 goto vcpu_destroy;
1744         }
1745
1746         kvm_for_each_vcpu(r, v, kvm)
1747                 if (v->vcpu_id == id) {
1748                         r = -EEXIST;
1749                         goto vcpu_destroy;
1750                 }
1751
1752         BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1753
1754         /* Now it's all set up, let userspace reach it */
1755         kvm_get_kvm(kvm);
1756         r = create_vcpu_fd(vcpu);
1757         if (r < 0) {
1758                 kvm_put_kvm(kvm);
1759                 goto vcpu_destroy;
1760         }
1761
1762         kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1763         smp_wmb();
1764         atomic_inc(&kvm->online_vcpus);
1765
1766 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1767         if (kvm->bsp_vcpu_id == id)
1768                 kvm->bsp_vcpu = vcpu;
1769 #endif
1770         mutex_unlock(&kvm->lock);
1771         return r;
1772
1773 vcpu_destroy:
1774         mutex_unlock(&kvm->lock);
1775         kvm_arch_vcpu_destroy(vcpu);
1776         return r;
1777 }
1778
1779 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1780 {
1781         if (sigset) {
1782                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1783                 vcpu->sigset_active = 1;
1784                 vcpu->sigset = *sigset;
1785         } else
1786                 vcpu->sigset_active = 0;
1787         return 0;
1788 }
1789
1790 #ifdef __KVM_HAVE_MSIX
1791 static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
1792                                     struct kvm_assigned_msix_nr *entry_nr)
1793 {
1794         int r = 0;
1795         struct kvm_assigned_dev_kernel *adev;
1796
1797         mutex_lock(&kvm->lock);
1798
1799         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1800                                       entry_nr->assigned_dev_id);
1801         if (!adev) {
1802                 r = -EINVAL;
1803                 goto msix_nr_out;
1804         }
1805
1806         if (adev->entries_nr == 0) {
1807                 adev->entries_nr = entry_nr->entry_nr;
1808                 if (adev->entries_nr == 0 ||
1809                     adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
1810                         r = -EINVAL;
1811                         goto msix_nr_out;
1812                 }
1813
1814                 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
1815                                                 entry_nr->entry_nr,
1816                                                 GFP_KERNEL);
1817                 if (!adev->host_msix_entries) {
1818                         r = -ENOMEM;
1819                         goto msix_nr_out;
1820                 }
1821                 adev->guest_msix_entries = kzalloc(
1822                                 sizeof(struct kvm_guest_msix_entry) *
1823                                 entry_nr->entry_nr, GFP_KERNEL);
1824                 if (!adev->guest_msix_entries) {
1825                         kfree(adev->host_msix_entries);
1826                         r = -ENOMEM;
1827                         goto msix_nr_out;
1828                 }
1829         } else /* Not allowed set MSI-X number twice */
1830                 r = -EINVAL;
1831 msix_nr_out:
1832         mutex_unlock(&kvm->lock);
1833         return r;
1834 }
1835
1836 static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
1837                                        struct kvm_assigned_msix_entry *entry)
1838 {
1839         int r = 0, i;
1840         struct kvm_assigned_dev_kernel *adev;
1841
1842         mutex_lock(&kvm->lock);
1843
1844         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1845                                       entry->assigned_dev_id);
1846
1847         if (!adev) {
1848                 r = -EINVAL;
1849                 goto msix_entry_out;
1850         }
1851
1852         for (i = 0; i < adev->entries_nr; i++)
1853                 if (adev->guest_msix_entries[i].vector == 0 ||
1854                     adev->guest_msix_entries[i].entry == entry->entry) {
1855                         adev->guest_msix_entries[i].entry = entry->entry;
1856                         adev->guest_msix_entries[i].vector = entry->gsi;
1857                         adev->host_msix_entries[i].entry = entry->entry;
1858                         break;
1859                 }
1860         if (i == adev->entries_nr) {
1861                 r = -ENOSPC;
1862                 goto msix_entry_out;
1863         }
1864
1865 msix_entry_out:
1866         mutex_unlock(&kvm->lock);
1867
1868         return r;
1869 }
1870 #endif
1871
1872 static long kvm_vcpu_ioctl(struct file *filp,
1873                            unsigned int ioctl, unsigned long arg)
1874 {
1875         struct kvm_vcpu *vcpu = filp->private_data;
1876         void __user *argp = (void __user *)arg;
1877         int r;
1878         struct kvm_fpu *fpu = NULL;
1879         struct kvm_sregs *kvm_sregs = NULL;
1880
1881         if (vcpu->kvm->mm != current->mm)
1882                 return -EIO;
1883         switch (ioctl) {
1884         case KVM_RUN:
1885                 r = -EINVAL;
1886                 if (arg)
1887                         goto out;
1888                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1889                 break;
1890         case KVM_GET_REGS: {
1891                 struct kvm_regs *kvm_regs;
1892
1893                 r = -ENOMEM;
1894                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1895                 if (!kvm_regs)
1896                         goto out;
1897                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1898                 if (r)
1899                         goto out_free1;
1900                 r = -EFAULT;
1901                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1902                         goto out_free1;
1903                 r = 0;
1904 out_free1:
1905                 kfree(kvm_regs);
1906                 break;
1907         }
1908         case KVM_SET_REGS: {
1909                 struct kvm_regs *kvm_regs;
1910
1911                 r = -ENOMEM;
1912                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1913                 if (!kvm_regs)
1914                         goto out;
1915                 r = -EFAULT;
1916                 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1917                         goto out_free2;
1918                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1919                 if (r)
1920                         goto out_free2;
1921                 r = 0;
1922 out_free2:
1923                 kfree(kvm_regs);
1924                 break;
1925         }
1926         case KVM_GET_SREGS: {
1927                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1928                 r = -ENOMEM;
1929                 if (!kvm_sregs)
1930                         goto out;
1931                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1932                 if (r)
1933                         goto out;
1934                 r = -EFAULT;
1935                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1936                         goto out;
1937                 r = 0;
1938                 break;
1939         }
1940         case KVM_SET_SREGS: {
1941                 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1942                 r = -ENOMEM;
1943                 if (!kvm_sregs)
1944                         goto out;
1945                 r = -EFAULT;
1946                 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1947                         goto out;
1948                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1949                 if (r)
1950                         goto out;
1951                 r = 0;
1952                 break;
1953         }
1954         case KVM_GET_MP_STATE: {
1955                 struct kvm_mp_state mp_state;
1956
1957                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1958                 if (r)
1959                         goto out;
1960                 r = -EFAULT;
1961                 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1962                         goto out;
1963                 r = 0;
1964                 break;
1965         }
1966         case KVM_SET_MP_STATE: {
1967                 struct kvm_mp_state mp_state;
1968
1969                 r = -EFAULT;
1970                 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1971                         goto out;
1972                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1973                 if (r)
1974                         goto out;
1975                 r = 0;
1976                 break;
1977         }
1978         case KVM_TRANSLATE: {
1979                 struct kvm_translation tr;
1980
1981                 r = -EFAULT;
1982                 if (copy_from_user(&tr, argp, sizeof tr))
1983                         goto out;
1984                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1985                 if (r)
1986                         goto out;
1987                 r = -EFAULT;
1988                 if (copy_to_user(argp, &tr, sizeof tr))
1989                         goto out;
1990                 r = 0;
1991                 break;
1992         }
1993         case KVM_SET_GUEST_DEBUG: {
1994                 struct kvm_guest_debug dbg;
1995
1996                 r = -EFAULT;
1997                 if (copy_from_user(&dbg, argp, sizeof dbg))
1998                         goto out;
1999                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
2000                 if (r)
2001                         goto out;
2002                 r = 0;
2003                 break;
2004         }
2005         case KVM_SET_SIGNAL_MASK: {
2006                 struct kvm_signal_mask __user *sigmask_arg = argp;
2007                 struct kvm_signal_mask kvm_sigmask;
2008                 sigset_t sigset, *p;
2009
2010                 p = NULL;
2011                 if (argp) {
2012                         r = -EFAULT;
2013                         if (copy_from_user(&kvm_sigmask, argp,
2014                                            sizeof kvm_sigmask))
2015                                 goto out;
2016                         r = -EINVAL;
2017                         if (kvm_sigmask.len != sizeof sigset)
2018                                 goto out;
2019                         r = -EFAULT;
2020                         if (copy_from_user(&sigset, sigmask_arg->sigset,
2021                                            sizeof sigset))
2022                                 goto out;
2023                         p = &sigset;
2024                 }
2025                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2026                 break;
2027         }
2028         case KVM_GET_FPU: {
2029                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2030                 r = -ENOMEM;
2031                 if (!fpu)
2032                         goto out;
2033                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
2034                 if (r)
2035                         goto out;
2036                 r = -EFAULT;
2037                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
2038                         goto out;
2039                 r = 0;
2040                 break;
2041         }
2042         case KVM_SET_FPU: {
2043                 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2044                 r = -ENOMEM;
2045                 if (!fpu)
2046                         goto out;
2047                 r = -EFAULT;
2048                 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
2049                         goto out;
2050                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
2051                 if (r)
2052                         goto out;
2053                 r = 0;
2054                 break;
2055         }
2056         default:
2057                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2058         }
2059 out:
2060         kfree(fpu);
2061         kfree(kvm_sregs);
2062         return r;
2063 }
2064
2065 static long kvm_vm_ioctl(struct file *filp,
2066                            unsigned int ioctl, unsigned long arg)
2067 {
2068         struct kvm *kvm = filp->private_data;
2069         void __user *argp = (void __user *)arg;
2070         int r;
2071
2072         if (kvm->mm != current->mm)
2073                 return -EIO;
2074         switch (ioctl) {
2075         case KVM_CREATE_VCPU:
2076                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2077                 if (r < 0)
2078                         goto out;
2079                 break;
2080         case KVM_SET_USER_MEMORY_REGION: {
2081                 struct kvm_userspace_memory_region kvm_userspace_mem;
2082
2083                 r = -EFAULT;
2084                 if (copy_from_user(&kvm_userspace_mem, argp,
2085                                                 sizeof kvm_userspace_mem))
2086                         goto out;
2087
2088                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
2089                 if (r)
2090                         goto out;
2091                 break;
2092         }
2093         case KVM_GET_DIRTY_LOG: {
2094                 struct kvm_dirty_log log;
2095
2096                 r = -EFAULT;
2097                 if (copy_from_user(&log, argp, sizeof log))
2098                         goto out;
2099                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2100                 if (r)
2101                         goto out;
2102                 break;
2103         }
2104 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2105         case KVM_REGISTER_COALESCED_MMIO: {
2106                 struct kvm_coalesced_mmio_zone zone;
2107                 r = -EFAULT;
2108                 if (copy_from_user(&zone, argp, sizeof zone))
2109                         goto out;
2110                 r = -ENXIO;
2111                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2112                 if (r)
2113                         goto out;
2114                 r = 0;
2115                 break;
2116         }
2117         case KVM_UNREGISTER_COALESCED_MMIO: {
2118                 struct kvm_coalesced_mmio_zone zone;
2119                 r = -EFAULT;
2120                 if (copy_from_user(&zone, argp, sizeof zone))
2121                         goto out;
2122                 r = -ENXIO;
2123                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2124                 if (r)
2125                         goto out;
2126                 r = 0;
2127                 break;
2128         }
2129 #endif
2130 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
2131         case KVM_ASSIGN_PCI_DEVICE: {
2132                 struct kvm_assigned_pci_dev assigned_dev;
2133
2134                 r = -EFAULT;
2135                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2136                         goto out;
2137                 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
2138                 if (r)
2139                         goto out;
2140                 break;
2141         }
2142         case KVM_ASSIGN_IRQ: {
2143                 r = -EOPNOTSUPP;
2144                 break;
2145         }
2146 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
2147         case KVM_ASSIGN_DEV_IRQ: {
2148                 struct kvm_assigned_irq assigned_irq;
2149
2150                 r = -EFAULT;
2151                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2152                         goto out;
2153                 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2154                 if (r)
2155                         goto out;
2156                 break;
2157         }
2158         case KVM_DEASSIGN_DEV_IRQ: {
2159                 struct kvm_assigned_irq assigned_irq;
2160
2161                 r = -EFAULT;
2162                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2163                         goto out;
2164                 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
2165                 if (r)
2166                         goto out;
2167                 break;
2168         }
2169 #endif
2170 #endif
2171 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2172         case KVM_DEASSIGN_PCI_DEVICE: {
2173                 struct kvm_assigned_pci_dev assigned_dev;
2174
2175                 r = -EFAULT;
2176                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2177                         goto out;
2178                 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
2179                 if (r)
2180                         goto out;
2181                 break;
2182         }
2183 #endif
2184 #ifdef KVM_CAP_IRQ_ROUTING
2185         case KVM_SET_GSI_ROUTING: {
2186                 struct kvm_irq_routing routing;
2187                 struct kvm_irq_routing __user *urouting;
2188                 struct kvm_irq_routing_entry *entries;
2189
2190                 r = -EFAULT;
2191                 if (copy_from_user(&routing, argp, sizeof(routing)))
2192                         goto out;
2193                 r = -EINVAL;
2194                 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2195                         goto out;
2196                 if (routing.flags)
2197                         goto out;
2198                 r = -ENOMEM;
2199                 entries = vmalloc(routing.nr * sizeof(*entries));
2200                 if (!entries)
2201                         goto out;
2202                 r = -EFAULT;
2203                 urouting = argp;
2204                 if (copy_from_user(entries, urouting->entries,
2205                                    routing.nr * sizeof(*entries)))
2206                         goto out_free_irq_routing;
2207                 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2208                                         routing.flags);
2209         out_free_irq_routing:
2210                 vfree(entries);
2211                 break;
2212         }
2213 #ifdef __KVM_HAVE_MSIX
2214         case KVM_ASSIGN_SET_MSIX_NR: {
2215                 struct kvm_assigned_msix_nr entry_nr;
2216                 r = -EFAULT;
2217                 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
2218                         goto out;
2219                 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
2220                 if (r)
2221                         goto out;
2222                 break;
2223         }
2224         case KVM_ASSIGN_SET_MSIX_ENTRY: {
2225                 struct kvm_assigned_msix_entry entry;
2226                 r = -EFAULT;
2227                 if (copy_from_user(&entry, argp, sizeof entry))
2228                         goto out;
2229                 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
2230                 if (r)
2231                         goto out;
2232                 break;
2233         }
2234 #endif
2235 #endif /* KVM_CAP_IRQ_ROUTING */
2236         case KVM_IRQFD: {
2237                 struct kvm_irqfd data;
2238
2239                 r = -EFAULT;
2240                 if (copy_from_user(&data, argp, sizeof data))
2241                         goto out;
2242                 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2243                 break;
2244         }
2245 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2246         case KVM_SET_BOOT_CPU_ID:
2247                 r = 0;
2248                 if (atomic_read(&kvm->online_vcpus) != 0)
2249                         r = -EBUSY;
2250                 else
2251                         kvm->bsp_vcpu_id = arg;
2252                 break;
2253 #endif
2254         default:
2255                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2256         }
2257 out:
2258         return r;
2259 }
2260
2261 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2262 {
2263         struct page *page[1];
2264         unsigned long addr;
2265         int npages;
2266         gfn_t gfn = vmf->pgoff;
2267         struct kvm *kvm = vma->vm_file->private_data;
2268
2269         addr = gfn_to_hva(kvm, gfn);
2270         if (kvm_is_error_hva(addr))
2271                 return VM_FAULT_SIGBUS;
2272
2273         npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2274                                 NULL);
2275         if (unlikely(npages != 1))
2276                 return VM_FAULT_SIGBUS;
2277
2278         vmf->page = page[0];
2279         return 0;
2280 }
2281
2282 static struct vm_operations_struct kvm_vm_vm_ops = {
2283         .fault = kvm_vm_fault,
2284 };
2285
2286 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2287 {
2288         vma->vm_ops = &kvm_vm_vm_ops;
2289         return 0;
2290 }
2291
2292 static struct file_operations kvm_vm_fops = {
2293         .release        = kvm_vm_release,
2294         .unlocked_ioctl = kvm_vm_ioctl,
2295         .compat_ioctl   = kvm_vm_ioctl,
2296         .mmap           = kvm_vm_mmap,
2297 };
2298
2299 static int kvm_dev_ioctl_create_vm(void)
2300 {
2301         int fd;
2302         struct kvm *kvm;
2303
2304         kvm = kvm_create_vm();
2305         if (IS_ERR(kvm))
2306                 return PTR_ERR(kvm);
2307         fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2308         if (fd < 0)
2309                 kvm_put_kvm(kvm);
2310
2311         return fd;
2312 }
2313
2314 static long kvm_dev_ioctl_check_extension_generic(long arg)
2315 {
2316         switch (arg) {
2317         case KVM_CAP_USER_MEMORY:
2318         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2319         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2320 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2321         case KVM_CAP_SET_BOOT_CPU_ID:
2322 #endif
2323                 return 1;
2324 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2325         case KVM_CAP_IRQ_ROUTING:
2326                 return KVM_MAX_IRQ_ROUTES;
2327 #endif
2328         default:
2329                 break;
2330         }
2331         return kvm_dev_ioctl_check_extension(arg);
2332 }
2333
2334 static long kvm_dev_ioctl(struct file *filp,
2335                           unsigned int ioctl, unsigned long arg)
2336 {
2337         long r = -EINVAL;
2338
2339         switch (ioctl) {
2340         case KVM_GET_API_VERSION:
2341                 r = -EINVAL;
2342                 if (arg)
2343                         goto out;
2344                 r = KVM_API_VERSION;
2345                 break;
2346         case KVM_CREATE_VM:
2347                 r = -EINVAL;
2348                 if (arg)
2349                         goto out;
2350                 r = kvm_dev_ioctl_create_vm();
2351                 break;
2352         case KVM_CHECK_EXTENSION:
2353                 r = kvm_dev_ioctl_check_extension_generic(arg);
2354                 break;
2355         case KVM_GET_VCPU_MMAP_SIZE:
2356                 r = -EINVAL;
2357                 if (arg)
2358                         goto out;
2359                 r = PAGE_SIZE;     /* struct kvm_run */
2360 #ifdef CONFIG_X86
2361                 r += PAGE_SIZE;    /* pio data page */
2362 #endif
2363 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2364                 r += PAGE_SIZE;    /* coalesced mmio ring page */
2365 #endif
2366                 break;
2367         case KVM_TRACE_ENABLE:
2368         case KVM_TRACE_PAUSE:
2369         case KVM_TRACE_DISABLE:
2370                 r = kvm_trace_ioctl(ioctl, arg);
2371                 break;
2372         default:
2373                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
2374         }
2375 out:
2376         return r;
2377 }
2378
2379 static struct file_operations kvm_chardev_ops = {
2380         .unlocked_ioctl = kvm_dev_ioctl,
2381         .compat_ioctl   = kvm_dev_ioctl,
2382 };
2383
2384 static struct miscdevice kvm_dev = {
2385         KVM_MINOR,
2386         "kvm",
2387         &kvm_chardev_ops,
2388 };
2389
2390 static void hardware_enable(void *junk)
2391 {
2392         int cpu = raw_smp_processor_id();
2393
2394         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2395                 return;
2396         cpumask_set_cpu(cpu, cpus_hardware_enabled);
2397         kvm_arch_hardware_enable(NULL);
2398 }
2399
2400 static void hardware_disable(void *junk)
2401 {
2402         int cpu = raw_smp_processor_id();
2403
2404         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2405                 return;
2406         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2407         kvm_arch_hardware_disable(NULL);
2408 }
2409
2410 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2411                            void *v)
2412 {
2413         int cpu = (long)v;
2414
2415         val &= ~CPU_TASKS_FROZEN;
2416         switch (val) {
2417         case CPU_DYING:
2418                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2419                        cpu);
2420                 hardware_disable(NULL);
2421                 break;
2422         case CPU_UP_CANCELED:
2423                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2424                        cpu);
2425                 smp_call_function_single(cpu, hardware_disable, NULL, 1);
2426                 break;
2427         case CPU_ONLINE:
2428                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2429                        cpu);
2430                 smp_call_function_single(cpu, hardware_enable, NULL, 1);
2431                 break;
2432         }
2433         return NOTIFY_OK;
2434 }
2435
2436
2437 asmlinkage void kvm_handle_fault_on_reboot(void)
2438 {
2439         if (kvm_rebooting)
2440                 /* spin while reset goes on */
2441                 while (true)
2442                         ;
2443         /* Fault while not rebooting.  We want the trace. */
2444         BUG();
2445 }
2446 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2447
2448 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2449                       void *v)
2450 {
2451         /*
2452          * Some (well, at least mine) BIOSes hang on reboot if
2453          * in vmx root mode.
2454          *
2455          * And Intel TXT required VMX off for all cpu when system shutdown.
2456          */
2457         printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2458         kvm_rebooting = true;
2459         on_each_cpu(hardware_disable, NULL, 1);
2460         return NOTIFY_OK;
2461 }
2462
2463 static struct notifier_block kvm_reboot_notifier = {
2464         .notifier_call = kvm_reboot,
2465         .priority = 0,
2466 };
2467
2468 void kvm_io_bus_init(struct kvm_io_bus *bus)
2469 {
2470         memset(bus, 0, sizeof(*bus));
2471 }
2472
2473 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2474 {
2475         int i;
2476
2477         for (i = 0; i < bus->dev_count; i++) {
2478                 struct kvm_io_device *pos = bus->devs[i];
2479
2480                 kvm_iodevice_destructor(pos);
2481         }
2482 }
2483
2484 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
2485                                           gpa_t addr, int len, int is_write)
2486 {
2487         int i;
2488
2489         for (i = 0; i < bus->dev_count; i++) {
2490                 struct kvm_io_device *pos = bus->devs[i];
2491
2492                 if (kvm_iodevice_in_range(pos, addr, len, is_write))
2493                         return pos;
2494         }
2495
2496         return NULL;
2497 }
2498
2499 void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2500 {
2501         BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2502
2503         bus->devs[bus->dev_count++] = dev;
2504 }
2505
2506 static struct notifier_block kvm_cpu_notifier = {
2507         .notifier_call = kvm_cpu_hotplug,
2508         .priority = 20, /* must be > scheduler priority */
2509 };
2510
2511 static int vm_stat_get(void *_offset, u64 *val)
2512 {
2513         unsigned offset = (long)_offset;
2514         struct kvm *kvm;
2515
2516         *val = 0;
2517         spin_lock(&kvm_lock);
2518         list_for_each_entry(kvm, &vm_list, vm_list)
2519                 *val += *(u32 *)((void *)kvm + offset);
2520         spin_unlock(&kvm_lock);
2521         return 0;
2522 }
2523
2524 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2525
2526 static int vcpu_stat_get(void *_offset, u64 *val)
2527 {
2528         unsigned offset = (long)_offset;
2529         struct kvm *kvm;
2530         struct kvm_vcpu *vcpu;
2531         int i;
2532
2533         *val = 0;
2534         spin_lock(&kvm_lock);
2535         list_for_each_entry(kvm, &vm_list, vm_list)
2536                 kvm_for_each_vcpu(i, vcpu, kvm)
2537                         *val += *(u32 *)((void *)vcpu + offset);
2538
2539         spin_unlock(&kvm_lock);
2540         return 0;
2541 }
2542
2543 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2544
2545 static struct file_operations *stat_fops[] = {
2546         [KVM_STAT_VCPU] = &vcpu_stat_fops,
2547         [KVM_STAT_VM]   = &vm_stat_fops,
2548 };
2549
2550 static void kvm_init_debug(void)
2551 {
2552         struct kvm_stats_debugfs_item *p;
2553
2554         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2555         for (p = debugfs_entries; p->name; ++p)
2556                 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2557                                                 (void *)(long)p->offset,
2558                                                 stat_fops[p->kind]);
2559 }
2560
2561 static void kvm_exit_debug(void)
2562 {
2563         struct kvm_stats_debugfs_item *p;
2564
2565         for (p = debugfs_entries; p->name; ++p)
2566                 debugfs_remove(p->dentry);
2567         debugfs_remove(kvm_debugfs_dir);
2568 }
2569
2570 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2571 {
2572         hardware_disable(NULL);
2573         return 0;
2574 }
2575
2576 static int kvm_resume(struct sys_device *dev)
2577 {
2578         hardware_enable(NULL);
2579         return 0;
2580 }
2581
2582 static struct sysdev_class kvm_sysdev_class = {
2583         .name = "kvm",
2584         .suspend = kvm_suspend,
2585         .resume = kvm_resume,
2586 };
2587
2588 static struct sys_device kvm_sysdev = {
2589         .id = 0,
2590         .cls = &kvm_sysdev_class,
2591 };
2592
2593 struct page *bad_page;
2594 pfn_t bad_pfn;
2595
2596 static inline
2597 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2598 {
2599         return container_of(pn, struct kvm_vcpu, preempt_notifier);
2600 }
2601
2602 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2603 {
2604         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2605
2606         kvm_arch_vcpu_load(vcpu, cpu);
2607 }
2608
2609 static void kvm_sched_out(struct preempt_notifier *pn,
2610                           struct task_struct *next)
2611 {
2612         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2613
2614         kvm_arch_vcpu_put(vcpu);
2615 }
2616
2617 int kvm_init(void *opaque, unsigned int vcpu_size,
2618                   struct module *module)
2619 {
2620         int r;
2621         int cpu;
2622
2623         kvm_init_debug();
2624
2625         r = kvm_arch_init(opaque);
2626         if (r)
2627                 goto out_fail;
2628
2629         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2630
2631         if (bad_page == NULL) {
2632                 r = -ENOMEM;
2633                 goto out;
2634         }
2635
2636         bad_pfn = page_to_pfn(bad_page);
2637
2638         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2639                 r = -ENOMEM;
2640                 goto out_free_0;
2641         }
2642
2643         r = kvm_arch_hardware_setup();
2644         if (r < 0)
2645                 goto out_free_0a;
2646
2647         for_each_online_cpu(cpu) {
2648                 smp_call_function_single(cpu,
2649                                 kvm_arch_check_processor_compat,
2650                                 &r, 1);
2651                 if (r < 0)
2652                         goto out_free_1;
2653         }
2654
2655         on_each_cpu(hardware_enable, NULL, 1);
2656         r = register_cpu_notifier(&kvm_cpu_notifier);
2657         if (r)
2658                 goto out_free_2;
2659         register_reboot_notifier(&kvm_reboot_notifier);
2660
2661         r = sysdev_class_register(&kvm_sysdev_class);
2662         if (r)
2663                 goto out_free_3;
2664
2665         r = sysdev_register(&kvm_sysdev);
2666         if (r)
2667                 goto out_free_4;
2668
2669         /* A kmem cache lets us meet the alignment requirements of fx_save. */
2670         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
2671                                            __alignof__(struct kvm_vcpu),
2672                                            0, NULL);
2673         if (!kvm_vcpu_cache) {
2674                 r = -ENOMEM;
2675                 goto out_free_5;
2676         }
2677
2678         kvm_chardev_ops.owner = module;
2679         kvm_vm_fops.owner = module;
2680         kvm_vcpu_fops.owner = module;
2681
2682         r = misc_register(&kvm_dev);
2683         if (r) {
2684                 printk(KERN_ERR "kvm: misc device register failed\n");
2685                 goto out_free;
2686         }
2687
2688         kvm_preempt_ops.sched_in = kvm_sched_in;
2689         kvm_preempt_ops.sched_out = kvm_sched_out;
2690
2691         return 0;
2692
2693 out_free:
2694         kmem_cache_destroy(kvm_vcpu_cache);
2695 out_free_5:
2696         sysdev_unregister(&kvm_sysdev);
2697 out_free_4:
2698         sysdev_class_unregister(&kvm_sysdev_class);
2699 out_free_3:
2700         unregister_reboot_notifier(&kvm_reboot_notifier);
2701         unregister_cpu_notifier(&kvm_cpu_notifier);
2702 out_free_2:
2703         on_each_cpu(hardware_disable, NULL, 1);
2704 out_free_1:
2705         kvm_arch_hardware_unsetup();
2706 out_free_0a:
2707         free_cpumask_var(cpus_hardware_enabled);
2708 out_free_0:
2709         __free_page(bad_page);
2710 out:
2711         kvm_arch_exit();
2712         kvm_exit_debug();
2713 out_fail:
2714         return r;
2715 }
2716 EXPORT_SYMBOL_GPL(kvm_init);
2717
2718 void kvm_exit(void)
2719 {
2720         kvm_trace_cleanup();
2721         misc_deregister(&kvm_dev);
2722         kmem_cache_destroy(kvm_vcpu_cache);
2723         sysdev_unregister(&kvm_sysdev);
2724         sysdev_class_unregister(&kvm_sysdev_class);
2725         unregister_reboot_notifier(&kvm_reboot_notifier);
2726         unregister_cpu_notifier(&kvm_cpu_notifier);
2727         on_each_cpu(hardware_disable, NULL, 1);
2728         kvm_arch_hardware_unsetup();
2729         kvm_arch_exit();
2730         kvm_exit_debug();
2731         free_cpumask_var(cpus_hardware_enabled);
2732         __free_page(bad_page);
2733 }
2734 EXPORT_SYMBOL_GPL(kvm_exit);