2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
24 #include "vfio_platform_private.h"
26 static DEFINE_MUTEX(driver_lock);
28 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
32 while (vdev->get_resource(vdev, cnt))
35 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
40 for (i = 0; i < cnt; i++) {
41 struct resource *res =
42 vdev->get_resource(vdev, i);
47 vdev->regions[i].addr = res->start;
48 vdev->regions[i].size = resource_size(res);
49 vdev->regions[i].flags = 0;
51 switch (resource_type(res)) {
53 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
54 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
55 if (!(res->flags & IORESOURCE_READONLY))
56 vdev->regions[i].flags |=
57 VFIO_REGION_INFO_FLAG_WRITE;
60 * Only regions addressed with PAGE granularity may be
63 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
64 !(vdev->regions[i].size & ~PAGE_MASK))
65 vdev->regions[i].flags |=
66 VFIO_REGION_INFO_FLAG_MMAP;
70 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
77 vdev->num_regions = cnt;
85 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
89 for (i = 0; i < vdev->num_regions; i++)
90 iounmap(vdev->regions[i].ioaddr);
92 vdev->num_regions = 0;
96 static void vfio_platform_release(void *device_data)
98 struct vfio_platform_device *vdev = device_data;
100 mutex_lock(&driver_lock);
102 if (!(--vdev->refcnt)) {
103 vfio_platform_regions_cleanup(vdev);
104 vfio_platform_irq_cleanup(vdev);
107 mutex_unlock(&driver_lock);
109 module_put(THIS_MODULE);
112 static int vfio_platform_open(void *device_data)
114 struct vfio_platform_device *vdev = device_data;
117 if (!try_module_get(THIS_MODULE))
120 mutex_lock(&driver_lock);
123 ret = vfio_platform_regions_init(vdev);
127 ret = vfio_platform_irq_init(vdev);
134 mutex_unlock(&driver_lock);
138 vfio_platform_regions_cleanup(vdev);
140 mutex_unlock(&driver_lock);
141 module_put(THIS_MODULE);
145 static long vfio_platform_ioctl(void *device_data,
146 unsigned int cmd, unsigned long arg)
148 struct vfio_platform_device *vdev = device_data;
151 if (cmd == VFIO_DEVICE_GET_INFO) {
152 struct vfio_device_info info;
154 minsz = offsetofend(struct vfio_device_info, num_irqs);
156 if (copy_from_user(&info, (void __user *)arg, minsz))
159 if (info.argsz < minsz)
162 info.flags = vdev->flags;
163 info.num_regions = vdev->num_regions;
164 info.num_irqs = vdev->num_irqs;
166 return copy_to_user((void __user *)arg, &info, minsz);
168 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
169 struct vfio_region_info info;
171 minsz = offsetofend(struct vfio_region_info, offset);
173 if (copy_from_user(&info, (void __user *)arg, minsz))
176 if (info.argsz < minsz)
179 if (info.index >= vdev->num_regions)
182 /* map offset to the physical address */
183 info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
184 info.size = vdev->regions[info.index].size;
185 info.flags = vdev->regions[info.index].flags;
187 return copy_to_user((void __user *)arg, &info, minsz);
189 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
190 struct vfio_irq_info info;
192 minsz = offsetofend(struct vfio_irq_info, count);
194 if (copy_from_user(&info, (void __user *)arg, minsz))
197 if (info.argsz < minsz)
200 if (info.index >= vdev->num_irqs)
203 info.flags = vdev->irqs[info.index].flags;
204 info.count = vdev->irqs[info.index].count;
206 return copy_to_user((void __user *)arg, &info, minsz);
208 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
209 struct vfio_irq_set hdr;
213 minsz = offsetofend(struct vfio_irq_set, count);
215 if (copy_from_user(&hdr, (void __user *)arg, minsz))
218 if (hdr.argsz < minsz)
221 if (hdr.index >= vdev->num_irqs)
224 if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
225 VFIO_IRQ_SET_ACTION_TYPE_MASK))
228 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
231 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
232 size = sizeof(uint8_t);
233 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
234 size = sizeof(int32_t);
238 if (hdr.argsz - minsz < size)
241 data = memdup_user((void __user *)(arg + minsz), size);
243 return PTR_ERR(data);
246 mutex_lock(&vdev->igate);
248 ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
249 hdr.start, hdr.count, data);
250 mutex_unlock(&vdev->igate);
255 } else if (cmd == VFIO_DEVICE_RESET)
261 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
262 char __user *buf, size_t count,
265 unsigned int done = 0;
269 ioremap_nocache(reg.addr, reg.size);
278 if (count >= 4 && !(off % 4)) {
281 val = ioread32(reg.ioaddr + off);
282 if (copy_to_user(buf, &val, 4))
286 } else if (count >= 2 && !(off % 2)) {
289 val = ioread16(reg.ioaddr + off);
290 if (copy_to_user(buf, &val, 2))
297 val = ioread8(reg.ioaddr + off);
298 if (copy_to_user(buf, &val, 1))
316 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
317 size_t count, loff_t *ppos)
319 struct vfio_platform_device *vdev = device_data;
320 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
321 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
323 if (index >= vdev->num_regions)
326 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
329 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
330 return vfio_platform_read_mmio(vdev->regions[index],
332 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
333 return -EINVAL; /* not implemented */
338 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
339 const char __user *buf, size_t count,
342 unsigned int done = 0;
346 ioremap_nocache(reg.addr, reg.size);
355 if (count >= 4 && !(off % 4)) {
358 if (copy_from_user(&val, buf, 4))
360 iowrite32(val, reg.ioaddr + off);
363 } else if (count >= 2 && !(off % 2)) {
366 if (copy_from_user(&val, buf, 2))
368 iowrite16(val, reg.ioaddr + off);
374 if (copy_from_user(&val, buf, 1))
376 iowrite8(val, reg.ioaddr + off);
392 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
393 size_t count, loff_t *ppos)
395 struct vfio_platform_device *vdev = device_data;
396 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
397 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
399 if (index >= vdev->num_regions)
402 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
405 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
406 return vfio_platform_write_mmio(vdev->regions[index],
408 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
409 return -EINVAL; /* not implemented */
414 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
415 struct vm_area_struct *vma)
417 u64 req_len, pgoff, req_start;
419 req_len = vma->vm_end - vma->vm_start;
420 pgoff = vma->vm_pgoff &
421 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
422 req_start = pgoff << PAGE_SHIFT;
424 if (region.size < PAGE_SIZE || req_start + req_len > region.size)
427 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
428 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
430 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
431 req_len, vma->vm_page_prot);
434 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
436 struct vfio_platform_device *vdev = device_data;
439 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
441 if (vma->vm_end < vma->vm_start)
443 if (!(vma->vm_flags & VM_SHARED))
445 if (index >= vdev->num_regions)
447 if (vma->vm_start & ~PAGE_MASK)
449 if (vma->vm_end & ~PAGE_MASK)
452 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
455 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
456 && (vma->vm_flags & VM_READ))
459 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
460 && (vma->vm_flags & VM_WRITE))
463 vma->vm_private_data = vdev;
465 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
466 return vfio_platform_mmap_mmio(vdev->regions[index], vma);
468 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
469 return -EINVAL; /* not implemented */
474 static const struct vfio_device_ops vfio_platform_ops = {
475 .name = "vfio-platform",
476 .open = vfio_platform_open,
477 .release = vfio_platform_release,
478 .ioctl = vfio_platform_ioctl,
479 .read = vfio_platform_read,
480 .write = vfio_platform_write,
481 .mmap = vfio_platform_mmap,
484 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
487 struct iommu_group *group;
493 group = iommu_group_get(dev);
495 pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
499 ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
501 iommu_group_put(group);
505 mutex_init(&vdev->igate);
509 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
511 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
513 struct vfio_platform_device *vdev;
515 vdev = vfio_del_group_dev(dev);
517 iommu_group_put(dev->iommu_group);
521 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);