4 * Copyright (c) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
12 #include <linux/kvm_host.h>
13 #include <linux/kvm.h>
15 #include "coalesced_mmio.h"
17 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
19 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22 static int coalesced_mmio_in_range(struct kvm_io_device *this,
23 gpa_t addr, int len, int is_write)
25 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
26 struct kvm_coalesced_mmio_zone *zone;
27 struct kvm_coalesced_mmio_ring *ring;
34 /* kvm->lock is taken by the caller and must be not released before
38 /* Are we able to batch it ? */
40 /* last is the first free entry
41 * check if we don't meet the first used entry
42 * there is always one unused entry in the buffer
44 ring = dev->kvm->coalesced_mmio_ring;
45 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
51 /* is it in a batchable area ? */
53 for (i = 0; i < dev->nb_zones; i++) {
56 /* (addr,len) is fully included in
57 * (zone->addr, zone->size)
60 if (zone->addr <= addr &&
61 addr + len <= zone->addr + zone->size)
67 static void coalesced_mmio_write(struct kvm_io_device *this,
68 gpa_t addr, int len, const void *val)
70 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
71 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
73 /* kvm->lock must be taken by caller before call to in_range()*/
75 /* copy data in first free entry of the ring */
77 ring->coalesced_mmio[ring->last].phys_addr = addr;
78 ring->coalesced_mmio[ring->last].len = len;
79 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
81 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
84 static void coalesced_mmio_destructor(struct kvm_io_device *this)
86 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
91 static const struct kvm_io_device_ops coalesced_mmio_ops = {
92 .write = coalesced_mmio_write,
93 .in_range = coalesced_mmio_in_range,
94 .destructor = coalesced_mmio_destructor,
97 int kvm_coalesced_mmio_init(struct kvm *kvm)
99 struct kvm_coalesced_mmio_dev *dev;
101 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
104 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
106 kvm->coalesced_mmio_dev = dev;
107 kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
112 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
113 struct kvm_coalesced_mmio_zone *zone)
115 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
120 mutex_lock(&kvm->lock);
121 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
122 mutex_unlock(&kvm->lock);
126 dev->zone[dev->nb_zones] = *zone;
129 mutex_unlock(&kvm->lock);
133 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
134 struct kvm_coalesced_mmio_zone *zone)
137 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
138 struct kvm_coalesced_mmio_zone *z;
143 mutex_lock(&kvm->lock);
147 z = &dev->zone[i - 1];
149 /* unregister all zones
150 * included in (zone->addr, zone->size)
153 if (zone->addr <= z->addr &&
154 z->addr + z->size <= zone->addr + zone->size) {
156 *z = dev->zone[dev->nb_zones];
161 mutex_unlock(&kvm->lock);