arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs
[firefly-linux-kernel-4.4.55.git] / virt / kvm / kvm_main.c
index 5a0817ee996ec058e1164dd77fb7404c1d6740a1..278232025129cae0686b5fa628c07f27169e7d2d 100644 (file)
@@ -95,8 +95,6 @@ static int hardware_enable_all(void);
 static void hardware_disable_all(void);
 
 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-static void update_memslots(struct kvm_memslots *slots,
-                           struct kvm_memory_slot *new, u64 last_generation);
 
 static void kvm_release_pfn_dirty(pfn_t pfn);
 static void mark_page_dirty_in_slot(struct kvm *kvm,
@@ -477,6 +475,13 @@ static struct kvm *kvm_create_vm(unsigned long type)
        kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!kvm->memslots)
                goto out_err_no_srcu;
+
+       /*
+        * Init kvm generation close to the maximum to easily test the
+        * code of handling generation number wrap-around.
+        */
+       kvm->memslots->generation = -150;
+
        kvm_init_memslots_id(kvm);
        if (init_srcu_struct(&kvm->srcu))
                goto out_err_no_srcu;
@@ -688,8 +693,7 @@ static void sort_memslots(struct kvm_memslots *slots)
 }
 
 static void update_memslots(struct kvm_memslots *slots,
-                           struct kvm_memory_slot *new,
-                           u64 last_generation)
+                           struct kvm_memory_slot *new)
 {
        if (new) {
                int id = new->id;
@@ -700,15 +704,13 @@ static void update_memslots(struct kvm_memslots *slots,
                if (new->npages != npages)
                        sort_memslots(slots);
        }
-
-       slots->generation = last_generation + 1;
 }
 
 static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
 {
        u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
 
-#ifdef KVM_CAP_READONLY_MEM
+#ifdef __KVM_HAVE_READONLY_MEM
        valid_flags |= KVM_MEM_READONLY;
 #endif
 
@@ -723,10 +725,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
 {
        struct kvm_memslots *old_memslots = kvm->memslots;
 
-       update_memslots(slots, new, kvm->memslots->generation);
+       /*
+        * Set the low bit in the generation, which disables SPTE caching
+        * until the end of synchronize_srcu_expedited.
+        */
+       WARN_ON(old_memslots->generation & 1);
+       slots->generation = old_memslots->generation + 1;
+
+       update_memslots(slots, new);
        rcu_assign_pointer(kvm->memslots, slots);
        synchronize_srcu_expedited(&kvm->srcu);
 
+       /*
+        * Increment the new memslot generation a second time. This prevents
+        * vm exits that race with memslot updates from caching a memslot
+        * generation that will (potentially) be valid forever.
+        */
+       slots->generation++;
+
        kvm_arch_memslots_updated(kvm);
 
        return old_memslots;
@@ -777,7 +793,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
        npages = mem->memory_size >> PAGE_SHIFT;
 
-       r = -EINVAL;
        if (npages > KVM_MEM_MAX_NR_PAGES)
                goto out;
 
@@ -791,7 +806,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        new.npages = npages;
        new.flags = mem->flags;
 
-       r = -EINVAL;
        if (npages) {
                if (!old.npages)
                        change = KVM_MR_CREATE;
@@ -847,7 +861,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        }
 
        if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
-               r = -ENOMEM;
                slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
                                GFP_KERNEL);
                if (!slots)
@@ -1076,9 +1089,9 @@ EXPORT_SYMBOL_GPL(gfn_to_hva);
  * If writable is set to false, the hva returned by this function is only
  * allowed to be read.
  */
-unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
+unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
+                                     gfn_t gfn, bool *writable)
 {
-       struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
        unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
 
        if (!kvm_is_error_hva(hva) && writable)
@@ -1087,6 +1100,13 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
        return hva;
 }
 
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
+{
+       struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+
+       return gfn_to_hva_memslot_prot(slot, gfn, writable);
+}
+
 static int kvm_read_hva(void *data, void __user *hva, int len)
 {
        return __copy_from_user(data, hva, len);
@@ -1769,8 +1789,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
        bool eligible;
 
        eligible = !vcpu->spin_loop.in_spin_loop ||
-                       (vcpu->spin_loop.in_spin_loop &&
-                        vcpu->spin_loop.dy_eligible);
+                   vcpu->spin_loop.dy_eligible;
 
        if (vcpu->spin_loop.in_spin_loop)
                kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
@@ -2260,6 +2279,29 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
        return filp->private_data;
 }
 
+static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
+#ifdef CONFIG_KVM_MPIC
+       [KVM_DEV_TYPE_FSL_MPIC_20]      = &kvm_mpic_ops,
+       [KVM_DEV_TYPE_FSL_MPIC_42]      = &kvm_mpic_ops,
+#endif
+
+#ifdef CONFIG_KVM_XICS
+       [KVM_DEV_TYPE_XICS]             = &kvm_xics_ops,
+#endif
+};
+
+int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
+{
+       if (type >= ARRAY_SIZE(kvm_device_ops_table))
+               return -ENOSPC;
+
+       if (kvm_device_ops_table[type] != NULL)
+               return -EEXIST;
+
+       kvm_device_ops_table[type] = ops;
+       return 0;
+}
+
 static int kvm_ioctl_create_device(struct kvm *kvm,
                                   struct kvm_create_device *cd)
 {
@@ -2268,36 +2310,12 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
        int ret;
 
-       switch (cd->type) {
-#ifdef CONFIG_KVM_MPIC
-       case KVM_DEV_TYPE_FSL_MPIC_20:
-       case KVM_DEV_TYPE_FSL_MPIC_42:
-               ops = &kvm_mpic_ops;
-               break;
-#endif
-#ifdef CONFIG_KVM_XICS
-       case KVM_DEV_TYPE_XICS:
-               ops = &kvm_xics_ops;
-               break;
-#endif
-#ifdef CONFIG_KVM_VFIO
-       case KVM_DEV_TYPE_VFIO:
-               ops = &kvm_vfio_ops;
-               break;
-#endif
-#ifdef CONFIG_KVM_ARM_VGIC
-       case KVM_DEV_TYPE_ARM_VGIC_V2:
-               ops = &kvm_arm_vgic_v2_ops;
-               break;
-#endif
-#ifdef CONFIG_S390
-       case KVM_DEV_TYPE_FLIC:
-               ops = &kvm_flic_ops;
-               break;
-#endif
-       default:
+       if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
+               return -ENODEV;
+
+       ops = kvm_device_ops_table[cd->type];
+       if (ops == NULL)
                return -ENODEV;
-       }
 
        if (test)
                return 0;
@@ -2612,7 +2630,6 @@ static long kvm_dev_ioctl(struct file *filp,
 
        switch (ioctl) {
        case KVM_GET_API_VERSION:
-               r = -EINVAL;
                if (arg)
                        goto out;
                r = KVM_API_VERSION;
@@ -2624,7 +2641,6 @@ static long kvm_dev_ioctl(struct file *filp,
                r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
                break;
        case KVM_GET_VCPU_MMAP_SIZE:
-               r = -EINVAL;
                if (arg)
                        goto out;
                r = PAGE_SIZE;     /* struct kvm_run */
@@ -2669,7 +2685,7 @@ static void hardware_enable_nolock(void *junk)
 
        cpumask_set_cpu(cpu, cpus_hardware_enabled);
 
-       r = kvm_arch_hardware_enable(NULL);
+       r = kvm_arch_hardware_enable();
 
        if (r) {
                cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -2694,7 +2710,7 @@ static void hardware_disable_nolock(void *junk)
        if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
                return;
        cpumask_clear_cpu(cpu, cpus_hardware_enabled);
-       kvm_arch_hardware_disable(NULL);
+       kvm_arch_hardware_disable();
 }
 
 static void hardware_disable(void)