2 * kvm eventfd support - use eventfd objects to signal various KVM events
4 * Copyright 2009 Novell. All Rights Reserved.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 * Gregory Haskins <ghaskins@novell.com>
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/kvm_host.h>
25 #include <linux/kvm.h>
26 #include <linux/kvm_irqfd.h>
27 #include <linux/workqueue.h>
28 #include <linux/syscalls.h>
29 #include <linux/wait.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
32 #include <linux/list.h>
33 #include <linux/eventfd.h>
34 #include <linux/kernel.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <linux/seqlock.h>
38 #include <linux/irqbypass.h>
39 #include <trace/events/kvm.h>
41 #include <kvm/iodev.h>
43 #ifdef CONFIG_HAVE_KVM_IRQFD
45 static struct workqueue_struct *irqfd_cleanup_wq;
48 irqfd_inject(struct work_struct *work)
50 struct kvm_kernel_irqfd *irqfd =
51 container_of(work, struct kvm_kernel_irqfd, inject);
52 struct kvm *kvm = irqfd->kvm;
54 if (!irqfd->resampler) {
55 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
60 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
61 irqfd->gsi, 1, false);
65 * Since resampler irqfds share an IRQ source ID, we de-assert once
66 * then notify all of the resampler irqfds using this GSI. We can't
67 * do multiple de-asserts or we risk racing with incoming re-asserts.
70 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
72 struct kvm_kernel_irqfd_resampler *resampler;
74 struct kvm_kernel_irqfd *irqfd;
77 resampler = container_of(kian,
78 struct kvm_kernel_irqfd_resampler, notifier);
81 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
82 resampler->notifier.gsi, 0, false);
84 idx = srcu_read_lock(&kvm->irq_srcu);
86 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87 eventfd_signal(irqfd->resamplefd, 1);
89 srcu_read_unlock(&kvm->irq_srcu, idx);
93 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
95 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
96 struct kvm *kvm = resampler->kvm;
98 mutex_lock(&kvm->irqfds.resampler_lock);
100 list_del_rcu(&irqfd->resampler_link);
101 synchronize_srcu(&kvm->irq_srcu);
103 if (list_empty(&resampler->list)) {
104 list_del(&resampler->link);
105 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
107 resampler->notifier.gsi, 0, false);
111 mutex_unlock(&kvm->irqfds.resampler_lock);
115 * Race-free decouple logic (ordering is critical)
118 irqfd_shutdown(struct work_struct *work)
120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown);
125 * Synchronize with the wait-queue and unhook ourselves to prevent
128 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
131 * We know no new events will be scheduled at this point, so block
132 * until all previously outstanding events have completed
134 flush_work(&irqfd->inject);
136 if (irqfd->resampler) {
137 irqfd_resampler_shutdown(irqfd);
138 eventfd_ctx_put(irqfd->resamplefd);
142 * It is now safe to release the object's resources
144 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
145 irq_bypass_unregister_consumer(&irqfd->consumer);
147 eventfd_ctx_put(irqfd->eventfd);
152 /* assumes kvm->irqfds.lock is held */
154 irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
156 return list_empty(&irqfd->list) ? false : true;
160 * Mark the irqfd as inactive and schedule it for removal
162 * assumes kvm->irqfds.lock is held
165 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
167 BUG_ON(!irqfd_is_active(irqfd));
169 list_del_init(&irqfd->list);
171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
174 int __attribute__((weak)) kvm_arch_set_irq(
175 struct kvm_kernel_irq_routing_entry *irq,
176 struct kvm *kvm, int irq_source_id,
184 * Called with wqh->lock held and interrupts disabled
187 irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
189 struct kvm_kernel_irqfd *irqfd =
190 container_of(wait, struct kvm_kernel_irqfd, wait);
191 unsigned long flags = (unsigned long)key;
192 struct kvm_kernel_irq_routing_entry irq;
193 struct kvm *kvm = irqfd->kvm;
197 if (flags & POLLIN) {
198 idx = srcu_read_lock(&kvm->irq_srcu);
200 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
201 irq = irqfd->irq_entry;
202 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
203 /* An event has been signaled, inject an interrupt */
204 if (irq.type == KVM_IRQ_ROUTING_MSI)
205 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
207 else if (kvm_arch_set_irq(&irq, kvm,
208 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
209 false) == -EWOULDBLOCK)
210 schedule_work(&irqfd->inject);
211 srcu_read_unlock(&kvm->irq_srcu, idx);
214 if (flags & POLLHUP) {
215 /* The eventfd is closing, detach from KVM */
218 spin_lock_irqsave(&kvm->irqfds.lock, flags);
221 * We must check if someone deactivated the irqfd before
222 * we could acquire the irqfds.lock since the item is
223 * deactivated from the KVM side before it is unhooked from
224 * the wait-queue. If it is already deactivated, we can
225 * simply return knowing the other side will cleanup for us.
226 * We cannot race against the irqfd going away since the
227 * other side is required to acquire wqh->lock, which we hold
229 if (irqfd_is_active(irqfd))
230 irqfd_deactivate(irqfd);
232 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
239 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
242 struct kvm_kernel_irqfd *irqfd =
243 container_of(pt, struct kvm_kernel_irqfd, pt);
244 add_wait_queue(wqh, &irqfd->wait);
247 /* Must be called under irqfds.lock */
248 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
250 struct kvm_kernel_irq_routing_entry *e;
251 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
254 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
256 write_seqcount_begin(&irqfd->irq_entry_sc);
260 irqfd->irq_entry = *e;
262 irqfd->irq_entry.type = 0;
264 write_seqcount_end(&irqfd->irq_entry_sc);
267 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
268 void __attribute__((weak)) kvm_arch_irq_bypass_stop(
269 struct irq_bypass_consumer *cons)
273 void __attribute__((weak)) kvm_arch_irq_bypass_start(
274 struct irq_bypass_consumer *cons)
278 int __attribute__((weak)) kvm_arch_update_irqfd_routing(
279 struct kvm *kvm, unsigned int host_irq,
280 uint32_t guest_irq, bool set)
287 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
289 struct kvm_kernel_irqfd *irqfd, *tmp;
291 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
296 if (!kvm_arch_intc_initialized(kvm))
299 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
304 irqfd->gsi = args->gsi;
305 INIT_LIST_HEAD(&irqfd->list);
306 INIT_WORK(&irqfd->inject, irqfd_inject);
307 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
308 seqcount_init(&irqfd->irq_entry_sc);
316 eventfd = eventfd_ctx_fileget(f.file);
317 if (IS_ERR(eventfd)) {
318 ret = PTR_ERR(eventfd);
322 irqfd->eventfd = eventfd;
324 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
325 struct kvm_kernel_irqfd_resampler *resampler;
327 resamplefd = eventfd_ctx_fdget(args->resamplefd);
328 if (IS_ERR(resamplefd)) {
329 ret = PTR_ERR(resamplefd);
333 irqfd->resamplefd = resamplefd;
334 INIT_LIST_HEAD(&irqfd->resampler_link);
336 mutex_lock(&kvm->irqfds.resampler_lock);
338 list_for_each_entry(resampler,
339 &kvm->irqfds.resampler_list, link) {
340 if (resampler->notifier.gsi == irqfd->gsi) {
341 irqfd->resampler = resampler;
346 if (!irqfd->resampler) {
347 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
350 mutex_unlock(&kvm->irqfds.resampler_lock);
354 resampler->kvm = kvm;
355 INIT_LIST_HEAD(&resampler->list);
356 resampler->notifier.gsi = irqfd->gsi;
357 resampler->notifier.irq_acked = irqfd_resampler_ack;
358 INIT_LIST_HEAD(&resampler->link);
360 list_add(&resampler->link, &kvm->irqfds.resampler_list);
361 kvm_register_irq_ack_notifier(kvm,
362 &resampler->notifier);
363 irqfd->resampler = resampler;
366 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
367 synchronize_srcu(&kvm->irq_srcu);
369 mutex_unlock(&kvm->irqfds.resampler_lock);
373 * Install our own custom wake-up handling so we are notified via
374 * a callback whenever someone signals the underlying eventfd
376 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
377 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
379 spin_lock_irq(&kvm->irqfds.lock);
382 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
383 if (irqfd->eventfd != tmp->eventfd)
385 /* This fd is used for another irq already. */
387 spin_unlock_irq(&kvm->irqfds.lock);
391 idx = srcu_read_lock(&kvm->irq_srcu);
392 irqfd_update(kvm, irqfd);
393 srcu_read_unlock(&kvm->irq_srcu, idx);
395 list_add_tail(&irqfd->list, &kvm->irqfds.items);
397 spin_unlock_irq(&kvm->irqfds.lock);
400 * Check if there was an event already pending on the eventfd
401 * before we registered, and trigger it as if we didn't miss it.
403 events = f.file->f_op->poll(f.file, &irqfd->pt);
406 schedule_work(&irqfd->inject);
409 * do not drop the file until the irqfd is fully initialized, otherwise
410 * we might race against the POLLHUP
413 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
414 irqfd->consumer.token = (void *)irqfd->eventfd;
415 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
416 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
417 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
418 irqfd->consumer.start = kvm_arch_irq_bypass_start;
419 ret = irq_bypass_register_consumer(&irqfd->consumer);
421 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
422 irqfd->consumer.token, ret);
428 if (irqfd->resampler)
429 irqfd_resampler_shutdown(irqfd);
431 if (resamplefd && !IS_ERR(resamplefd))
432 eventfd_ctx_put(resamplefd);
434 if (eventfd && !IS_ERR(eventfd))
435 eventfd_ctx_put(eventfd);
444 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
446 struct kvm_irq_ack_notifier *kian;
449 idx = srcu_read_lock(&kvm->irq_srcu);
450 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
452 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
454 if (kian->gsi == gsi) {
455 srcu_read_unlock(&kvm->irq_srcu, idx);
459 srcu_read_unlock(&kvm->irq_srcu, idx);
463 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
465 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
467 struct kvm_irq_ack_notifier *kian;
469 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
471 if (kian->gsi == gsi)
472 kian->irq_acked(kian);
475 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
479 trace_kvm_ack_irq(irqchip, pin);
481 idx = srcu_read_lock(&kvm->irq_srcu);
482 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
484 kvm_notify_acked_gsi(kvm, gsi);
485 srcu_read_unlock(&kvm->irq_srcu, idx);
488 void kvm_register_irq_ack_notifier(struct kvm *kvm,
489 struct kvm_irq_ack_notifier *kian)
491 mutex_lock(&kvm->irq_lock);
492 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
493 mutex_unlock(&kvm->irq_lock);
494 kvm_vcpu_request_scan_ioapic(kvm);
497 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
498 struct kvm_irq_ack_notifier *kian)
500 mutex_lock(&kvm->irq_lock);
501 hlist_del_init_rcu(&kian->link);
502 mutex_unlock(&kvm->irq_lock);
503 synchronize_srcu(&kvm->irq_srcu);
504 kvm_vcpu_request_scan_ioapic(kvm);
509 kvm_eventfd_init(struct kvm *kvm)
511 #ifdef CONFIG_HAVE_KVM_IRQFD
512 spin_lock_init(&kvm->irqfds.lock);
513 INIT_LIST_HEAD(&kvm->irqfds.items);
514 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
515 mutex_init(&kvm->irqfds.resampler_lock);
517 INIT_LIST_HEAD(&kvm->ioeventfds);
520 #ifdef CONFIG_HAVE_KVM_IRQFD
522 * shutdown any irqfd's that match fd+gsi
525 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
527 struct kvm_kernel_irqfd *irqfd, *tmp;
528 struct eventfd_ctx *eventfd;
530 eventfd = eventfd_ctx_fdget(args->fd);
532 return PTR_ERR(eventfd);
534 spin_lock_irq(&kvm->irqfds.lock);
536 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
537 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
539 * This clearing of irq_entry.type is needed for when
540 * another thread calls kvm_irq_routing_update before
541 * we flush workqueue below (we synchronize with
542 * kvm_irq_routing_update using irqfds.lock).
544 write_seqcount_begin(&irqfd->irq_entry_sc);
545 irqfd->irq_entry.type = 0;
546 write_seqcount_end(&irqfd->irq_entry_sc);
547 irqfd_deactivate(irqfd);
551 spin_unlock_irq(&kvm->irqfds.lock);
552 eventfd_ctx_put(eventfd);
555 * Block until we know all outstanding shutdown jobs have completed
556 * so that we guarantee there will not be any more interrupts on this
557 * gsi once this deassign function returns.
559 flush_workqueue(irqfd_cleanup_wq);
565 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
567 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
570 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
571 return kvm_irqfd_deassign(kvm, args);
573 return kvm_irqfd_assign(kvm, args);
577 * This function is called as the kvm VM fd is being released. Shutdown all
578 * irqfds that still remain open
581 kvm_irqfd_release(struct kvm *kvm)
583 struct kvm_kernel_irqfd *irqfd, *tmp;
585 spin_lock_irq(&kvm->irqfds.lock);
587 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
588 irqfd_deactivate(irqfd);
590 spin_unlock_irq(&kvm->irqfds.lock);
593 * Block until we know all outstanding shutdown jobs have completed
594 * since we do not take a kvm* reference.
596 flush_workqueue(irqfd_cleanup_wq);
601 * Take note of a change in irq routing.
602 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
604 void kvm_irq_routing_update(struct kvm *kvm)
606 struct kvm_kernel_irqfd *irqfd;
608 spin_lock_irq(&kvm->irqfds.lock);
610 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
611 irqfd_update(kvm, irqfd);
613 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
614 if (irqfd->producer) {
615 int ret = kvm_arch_update_irqfd_routing(
616 irqfd->kvm, irqfd->producer->irq,
623 spin_unlock_irq(&kvm->irqfds.lock);
627 * create a host-wide workqueue for issuing deferred shutdown requests
628 * aggregated from all vm* instances. We need our own isolated single-thread
629 * queue to prevent deadlock against flushing the normal work-queue.
631 int kvm_irqfd_init(void)
633 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
634 if (!irqfd_cleanup_wq)
640 void kvm_irqfd_exit(void)
642 destroy_workqueue(irqfd_cleanup_wq);
647 * --------------------------------------------------------------------
648 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
650 * userspace can register a PIO/MMIO address with an eventfd for receiving
651 * notification when the memory has been touched.
652 * --------------------------------------------------------------------
656 struct list_head list;
659 struct eventfd_ctx *eventfd;
661 struct kvm_io_device dev;
666 static inline struct _ioeventfd *
667 to_ioeventfd(struct kvm_io_device *dev)
669 return container_of(dev, struct _ioeventfd, dev);
673 ioeventfd_release(struct _ioeventfd *p)
675 eventfd_ctx_put(p->eventfd);
681 ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
686 /* address must be precise for a hit */
690 /* length = 0 means only look at the address, so always a hit */
693 if (len != p->length)
694 /* address-range must be precise for a hit */
698 /* all else equal, wildcard is always a hit */
701 /* otherwise, we have to actually compare the data */
703 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
722 return _val == p->datamatch ? true : false;
725 /* MMIO/PIO writes trigger an event if the addr/val match */
727 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
728 int len, const void *val)
730 struct _ioeventfd *p = to_ioeventfd(this);
732 if (!ioeventfd_in_range(p, addr, len, val))
735 eventfd_signal(p->eventfd, 1);
740 * This function is called as KVM is completely shutting down. We do not
741 * need to worry about locking just nuke anything we have as quickly as possible
744 ioeventfd_destructor(struct kvm_io_device *this)
746 struct _ioeventfd *p = to_ioeventfd(this);
748 ioeventfd_release(p);
751 static const struct kvm_io_device_ops ioeventfd_ops = {
752 .write = ioeventfd_write,
753 .destructor = ioeventfd_destructor,
756 /* assumes kvm->slots_lock held */
758 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
760 struct _ioeventfd *_p;
762 list_for_each_entry(_p, &kvm->ioeventfds, list)
763 if (_p->bus_idx == p->bus_idx &&
764 _p->addr == p->addr &&
765 (!_p->length || !p->length ||
766 (_p->length == p->length &&
767 (_p->wildcard || p->wildcard ||
768 _p->datamatch == p->datamatch))))
774 static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
776 if (flags & KVM_IOEVENTFD_FLAG_PIO)
778 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
779 return KVM_VIRTIO_CCW_NOTIFY_BUS;
783 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
784 enum kvm_bus bus_idx,
785 struct kvm_ioeventfd *args)
788 struct eventfd_ctx *eventfd;
789 struct _ioeventfd *p;
792 eventfd = eventfd_ctx_fdget(args->fd);
794 return PTR_ERR(eventfd);
796 p = kzalloc(sizeof(*p), GFP_KERNEL);
802 INIT_LIST_HEAD(&p->list);
803 p->addr = args->addr;
804 p->bus_idx = bus_idx;
805 p->length = args->len;
806 p->eventfd = eventfd;
808 /* The datamatch feature is optional, otherwise this is a wildcard */
809 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
810 p->datamatch = args->datamatch;
814 mutex_lock(&kvm->slots_lock);
816 /* Verify that there isn't a match already */
817 if (ioeventfd_check_collision(kvm, p)) {
822 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
824 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
829 kvm->buses[bus_idx]->ioeventfd_count++;
830 list_add_tail(&p->list, &kvm->ioeventfds);
832 mutex_unlock(&kvm->slots_lock);
837 mutex_unlock(&kvm->slots_lock);
841 eventfd_ctx_put(eventfd);
847 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
848 struct kvm_ioeventfd *args)
850 struct _ioeventfd *p, *tmp;
851 struct eventfd_ctx *eventfd;
854 eventfd = eventfd_ctx_fdget(args->fd);
856 return PTR_ERR(eventfd);
858 mutex_lock(&kvm->slots_lock);
860 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
861 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
863 if (p->bus_idx != bus_idx ||
864 p->eventfd != eventfd ||
865 p->addr != args->addr ||
866 p->length != args->len ||
867 p->wildcard != wildcard)
870 if (!p->wildcard && p->datamatch != args->datamatch)
873 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
874 kvm->buses[bus_idx]->ioeventfd_count--;
875 ioeventfd_release(p);
880 mutex_unlock(&kvm->slots_lock);
882 eventfd_ctx_put(eventfd);
887 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
889 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
890 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
892 if (!args->len && bus_idx == KVM_MMIO_BUS)
893 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
899 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
901 enum kvm_bus bus_idx;
904 bus_idx = ioeventfd_bus_from_flags(args->flags);
905 /* must be natural-word sized, or 0 to ignore length */
917 /* check for range overflow */
918 if (args->addr + args->len < args->addr)
921 /* check for extra flags that we don't understand */
922 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
925 /* ioeventfd with no length can't be combined with DATAMATCH */
926 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
929 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
933 /* When length is ignored, MMIO is also put on a separate bus, for
936 if (!args->len && bus_idx == KVM_MMIO_BUS) {
937 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
945 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
951 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
953 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
954 return kvm_deassign_ioeventfd(kvm, args);
956 return kvm_assign_ioeventfd(kvm, args);