KVM: document lock nesting rule
[firefly-linux-kernel-4.4.55.git] / virt / kvm / kvm_main.c
index 1da8072d61b1f8655df317fc02721737a09e21cc..fc1b58a72757b43d75f04534ffc37d222033c91a 100644 (file)
@@ -68,7 +68,7 @@ MODULE_LICENSE("GPL");
 /*
  * Ordering of locks:
  *
- *             kvm->lock --> kvm->irq_lock
+ *             kvm->slots_lock --> kvm->lock --> kvm->irq_lock
  */
 
 DEFINE_SPINLOCK(kvm_lock);
@@ -1001,19 +1001,25 @@ out:
 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
                                  struct kvm_memory_slot *dont)
 {
+       int i;
+
        if (!dont || free->rmap != dont->rmap)
                vfree(free->rmap);
 
        if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
                vfree(free->dirty_bitmap);
 
-       if (!dont || free->lpage_info != dont->lpage_info)
-               vfree(free->lpage_info);
+
+       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+               if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
+                       vfree(free->lpage_info[i]);
+                       free->lpage_info[i] = NULL;
+               }
+       }
 
        free->npages = 0;
        free->dirty_bitmap = NULL;
        free->rmap = NULL;
-       free->lpage_info = NULL;
 }
 
 void kvm_free_physmem(struct kvm *kvm)
@@ -1087,7 +1093,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
        int r;
        gfn_t base_gfn;
        unsigned long npages, ugfn;
-       unsigned long largepages, i;
+       int lpages;
+       unsigned long i, j;
        struct kvm_memory_slot *memslot;
        struct kvm_memory_slot old, new;
 
@@ -1161,33 +1168,48 @@ int __kvm_set_memory_region(struct kvm *kvm,
                else
                        new.userspace_addr = 0;
        }
-       if (npages && !new.lpage_info) {
-               largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
-               largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
+       if (!npages)
+               goto skip_lpage;
+
+       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+               int level = i + 2;
 
-               new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
+               /* Avoid unused variable warning if no large pages */
+               (void)level;
 
-               if (!new.lpage_info)
+               if (new.lpage_info[i])
+                       continue;
+
+               lpages = 1 + (base_gfn + npages - 1) /
+                            KVM_PAGES_PER_HPAGE(level);
+               lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
+
+               new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
+
+               if (!new.lpage_info[i])
                        goto out_free;
 
-               memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
+               memset(new.lpage_info[i], 0,
+                      lpages * sizeof(*new.lpage_info[i]));
 
-               if (base_gfn % KVM_PAGES_PER_HPAGE)
-                       new.lpage_info[0].write_count = 1;
-               if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
-                       new.lpage_info[largepages-1].write_count = 1;
+               if (base_gfn % KVM_PAGES_PER_HPAGE(level))
+                       new.lpage_info[i][0].write_count = 1;
+               if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
+                       new.lpage_info[i][lpages - 1].write_count = 1;
                ugfn = new.userspace_addr >> PAGE_SHIFT;
                /*
                 * If the gfn and userspace address are not aligned wrt each
                 * other, or if explicitly asked to, disable large page
                 * support for this slot
                 */
-               if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) ||
+               if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
                    !largepages_enabled)
-                       for (i = 0; i < largepages; ++i)
-                               new.lpage_info[i].write_count = 1;
+                       for (j = 0; j < lpages; ++j)
+                               new.lpage_info[i][j].write_count = 1;
        }
 
+skip_lpage:
+
        /* Allocate page dirty bitmap if needed */
        if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
                unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
@@ -2252,10 +2274,12 @@ static long kvm_vm_ioctl(struct file *filp,
 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
        case KVM_SET_BOOT_CPU_ID:
                r = 0;
+               mutex_lock(&kvm->lock);
                if (atomic_read(&kvm->online_vcpus) != 0)
                        r = -EBUSY;
                else
                        kvm->bsp_vcpu_id = arg;
+               mutex_unlock(&kvm->lock);
                break;
 #endif
        default:
@@ -2374,7 +2398,7 @@ static long kvm_dev_ioctl(struct file *filp,
        case KVM_TRACE_ENABLE:
        case KVM_TRACE_PAUSE:
        case KVM_TRACE_DISABLE:
-               r = kvm_trace_ioctl(ioctl, arg);
+               r = -EOPNOTSUPP;
                break;
        default:
                return kvm_arch_dev_ioctl(filp, ioctl, arg);
@@ -2488,22 +2512,38 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
        }
 }
 
-struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
-                                         gpa_t addr, int len, int is_write)
+/* kvm_io_bus_write - called under kvm->slots_lock */
+int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
+                    int len, const void *val)
 {
        int i;
+       for (i = 0; i < bus->dev_count; i++)
+               if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
+                       return 0;
+       return -EOPNOTSUPP;
+}
 
-       for (i = 0; i < bus->dev_count; i++) {
-               struct kvm_io_device *pos = bus->devs[i];
-
-               if (kvm_iodevice_in_range(pos, addr, len, is_write))
-                       return pos;
-       }
+/* kvm_io_bus_read - called under kvm->slots_lock */
+int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
+{
+       int i;
+       for (i = 0; i < bus->dev_count; i++)
+               if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
+                       return 0;
+       return -EOPNOTSUPP;
+}
 
-       return NULL;
+void kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
+                            struct kvm_io_device *dev)
+{
+       down_write(&kvm->slots_lock);
+       __kvm_io_bus_register_dev(bus, dev);
+       up_write(&kvm->slots_lock);
 }
 
-void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+/* An unlocked version. Caller must have write lock on slots_lock. */
+void __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+                            struct kvm_io_device *dev)
 {
        BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
 
@@ -2724,7 +2764,6 @@ EXPORT_SYMBOL_GPL(kvm_init);
 
 void kvm_exit(void)
 {
-       kvm_trace_cleanup();
        tracepoint_synchronize_unregister();
        misc_deregister(&kvm_dev);
        kmem_cache_destroy(kvm_vcpu_cache);