2 * VFIO platform devices interrupt handling
4 * Copyright (C) 2013 - Virtual Open Systems
5 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/eventfd.h>
18 #include <linux/interrupt.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/vfio.h>
22 #include <linux/irq.h>
24 #include "vfio_platform_private.h"
26 static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
30 spin_lock_irqsave(&irq_ctx->lock, flags);
32 if (!irq_ctx->masked) {
33 disable_irq_nosync(irq_ctx->hwirq);
34 irq_ctx->masked = true;
37 spin_unlock_irqrestore(&irq_ctx->lock, flags);
40 static int vfio_platform_mask_handler(void *opaque, void *unused)
42 struct vfio_platform_irq *irq_ctx = opaque;
44 vfio_platform_mask(irq_ctx);
49 static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
50 unsigned index, unsigned start,
51 unsigned count, uint32_t flags,
54 if (start != 0 || count != 1)
57 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
60 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
61 int32_t fd = *(int32_t *)data;
64 return vfio_virqfd_enable((void *) &vdev->irqs[index],
65 vfio_platform_mask_handler,
67 &vdev->irqs[index].mask, fd);
69 vfio_virqfd_disable(&vdev->irqs[index].mask);
73 if (flags & VFIO_IRQ_SET_DATA_NONE) {
74 vfio_platform_mask(&vdev->irqs[index]);
76 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
77 uint8_t mask = *(uint8_t *)data;
80 vfio_platform_mask(&vdev->irqs[index]);
86 static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
90 spin_lock_irqsave(&irq_ctx->lock, flags);
92 if (irq_ctx->masked) {
93 enable_irq(irq_ctx->hwirq);
94 irq_ctx->masked = false;
97 spin_unlock_irqrestore(&irq_ctx->lock, flags);
100 static int vfio_platform_unmask_handler(void *opaque, void *unused)
102 struct vfio_platform_irq *irq_ctx = opaque;
104 vfio_platform_unmask(irq_ctx);
109 static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
110 unsigned index, unsigned start,
111 unsigned count, uint32_t flags,
114 if (start != 0 || count != 1)
117 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
120 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
121 int32_t fd = *(int32_t *)data;
124 return vfio_virqfd_enable((void *) &vdev->irqs[index],
125 vfio_platform_unmask_handler,
127 &vdev->irqs[index].unmask,
130 vfio_virqfd_disable(&vdev->irqs[index].unmask);
134 if (flags & VFIO_IRQ_SET_DATA_NONE) {
135 vfio_platform_unmask(&vdev->irqs[index]);
137 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
138 uint8_t unmask = *(uint8_t *)data;
141 vfio_platform_unmask(&vdev->irqs[index]);
147 static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
149 struct vfio_platform_irq *irq_ctx = dev_id;
153 spin_lock_irqsave(&irq_ctx->lock, flags);
155 if (!irq_ctx->masked) {
158 /* automask maskable interrupts */
159 disable_irq_nosync(irq_ctx->hwirq);
160 irq_ctx->masked = true;
163 spin_unlock_irqrestore(&irq_ctx->lock, flags);
165 if (ret == IRQ_HANDLED)
166 eventfd_signal(irq_ctx->trigger, 1);
171 static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
173 struct vfio_platform_irq *irq_ctx = dev_id;
175 eventfd_signal(irq_ctx->trigger, 1);
180 static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
181 int fd, irq_handler_t handler)
183 struct vfio_platform_irq *irq = &vdev->irqs[index];
184 struct eventfd_ctx *trigger;
188 free_irq(irq->hwirq, irq);
190 eventfd_ctx_put(irq->trigger);
194 if (fd < 0) /* Disable only */
197 irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
198 irq->hwirq, vdev->name);
202 trigger = eventfd_ctx_fdget(fd);
203 if (IS_ERR(trigger)) {
205 return PTR_ERR(trigger);
208 irq->trigger = trigger;
210 irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
211 ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
214 eventfd_ctx_put(trigger);
220 enable_irq(irq->hwirq);
225 static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
226 unsigned index, unsigned start,
227 unsigned count, uint32_t flags,
230 struct vfio_platform_irq *irq = &vdev->irqs[index];
231 irq_handler_t handler;
233 if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
234 handler = vfio_automasked_irq_handler;
236 handler = vfio_irq_handler;
238 if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
239 return vfio_set_trigger(vdev, index, -1, handler);
241 if (start != 0 || count != 1)
244 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
245 int32_t fd = *(int32_t *)data;
247 return vfio_set_trigger(vdev, index, fd, handler);
250 if (flags & VFIO_IRQ_SET_DATA_NONE) {
251 handler(irq->hwirq, irq);
253 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
254 uint8_t trigger = *(uint8_t *)data;
257 handler(irq->hwirq, irq);
263 int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
264 uint32_t flags, unsigned index, unsigned start,
265 unsigned count, void *data)
267 int (*func)(struct vfio_platform_device *vdev, unsigned index,
268 unsigned start, unsigned count, uint32_t flags,
271 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
272 case VFIO_IRQ_SET_ACTION_MASK:
273 func = vfio_platform_set_irq_mask;
275 case VFIO_IRQ_SET_ACTION_UNMASK:
276 func = vfio_platform_set_irq_unmask;
278 case VFIO_IRQ_SET_ACTION_TRIGGER:
279 func = vfio_platform_set_irq_trigger;
286 return func(vdev, index, start, count, flags, data);
289 int vfio_platform_irq_init(struct vfio_platform_device *vdev)
293 while (vdev->get_irq(vdev, cnt) >= 0)
296 vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL);
300 for (i = 0; i < cnt; i++) {
301 int hwirq = vdev->get_irq(vdev, i);
306 spin_lock_init(&vdev->irqs[i].lock);
308 vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
310 if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
311 vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
312 | VFIO_IRQ_INFO_AUTOMASKED;
314 vdev->irqs[i].count = 1;
315 vdev->irqs[i].hwirq = hwirq;
316 vdev->irqs[i].masked = false;
319 vdev->num_irqs = cnt;
327 void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
331 for (i = 0; i < vdev->num_irqs; i++)
332 vfio_set_trigger(vdev, i, -1, NULL);