2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
24 #include "vfio_platform_private.h"
26 static DEFINE_MUTEX(driver_lock);
28 static const struct vfio_platform_reset_combo reset_lookup_table[] = {
30 .compat = "calxeda,hb-xgmac",
31 .reset_function_name = "vfio_platform_calxedaxgmac_reset",
32 .module_name = "vfio-platform-calxedaxgmac",
36 static void vfio_platform_get_reset(struct vfio_platform_device *vdev,
40 int (*reset)(struct vfio_platform_device *);
43 ret = device_property_read_string(dev, "compatible", &compat);
47 for (i = 0 ; i < ARRAY_SIZE(reset_lookup_table); i++) {
48 if (!strcmp(reset_lookup_table[i].compat, compat)) {
49 request_module(reset_lookup_table[i].module_name);
51 reset_lookup_table[i].reset_function_name);
60 static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
63 symbol_put_addr(vdev->reset);
66 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
70 while (vdev->get_resource(vdev, cnt))
73 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
78 for (i = 0; i < cnt; i++) {
79 struct resource *res =
80 vdev->get_resource(vdev, i);
85 vdev->regions[i].addr = res->start;
86 vdev->regions[i].size = resource_size(res);
87 vdev->regions[i].flags = 0;
89 switch (resource_type(res)) {
91 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
92 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
93 if (!(res->flags & IORESOURCE_READONLY))
94 vdev->regions[i].flags |=
95 VFIO_REGION_INFO_FLAG_WRITE;
98 * Only regions addressed with PAGE granularity may be
101 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
102 !(vdev->regions[i].size & ~PAGE_MASK))
103 vdev->regions[i].flags |=
104 VFIO_REGION_INFO_FLAG_MMAP;
108 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
115 vdev->num_regions = cnt;
119 kfree(vdev->regions);
123 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
127 for (i = 0; i < vdev->num_regions; i++)
128 iounmap(vdev->regions[i].ioaddr);
130 vdev->num_regions = 0;
131 kfree(vdev->regions);
134 static void vfio_platform_release(void *device_data)
136 struct vfio_platform_device *vdev = device_data;
138 mutex_lock(&driver_lock);
140 if (!(--vdev->refcnt)) {
143 vfio_platform_regions_cleanup(vdev);
144 vfio_platform_irq_cleanup(vdev);
147 mutex_unlock(&driver_lock);
149 module_put(THIS_MODULE);
152 static int vfio_platform_open(void *device_data)
154 struct vfio_platform_device *vdev = device_data;
157 if (!try_module_get(THIS_MODULE))
160 mutex_lock(&driver_lock);
163 ret = vfio_platform_regions_init(vdev);
167 ret = vfio_platform_irq_init(vdev);
177 mutex_unlock(&driver_lock);
181 vfio_platform_regions_cleanup(vdev);
183 mutex_unlock(&driver_lock);
184 module_put(THIS_MODULE);
188 static long vfio_platform_ioctl(void *device_data,
189 unsigned int cmd, unsigned long arg)
191 struct vfio_platform_device *vdev = device_data;
194 if (cmd == VFIO_DEVICE_GET_INFO) {
195 struct vfio_device_info info;
197 minsz = offsetofend(struct vfio_device_info, num_irqs);
199 if (copy_from_user(&info, (void __user *)arg, minsz))
202 if (info.argsz < minsz)
206 vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
207 info.flags = vdev->flags;
208 info.num_regions = vdev->num_regions;
209 info.num_irqs = vdev->num_irqs;
211 return copy_to_user((void __user *)arg, &info, minsz);
213 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
214 struct vfio_region_info info;
216 minsz = offsetofend(struct vfio_region_info, offset);
218 if (copy_from_user(&info, (void __user *)arg, minsz))
221 if (info.argsz < minsz)
224 if (info.index >= vdev->num_regions)
227 /* map offset to the physical address */
228 info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
229 info.size = vdev->regions[info.index].size;
230 info.flags = vdev->regions[info.index].flags;
232 return copy_to_user((void __user *)arg, &info, minsz);
234 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
235 struct vfio_irq_info info;
237 minsz = offsetofend(struct vfio_irq_info, count);
239 if (copy_from_user(&info, (void __user *)arg, minsz))
242 if (info.argsz < minsz)
245 if (info.index >= vdev->num_irqs)
248 info.flags = vdev->irqs[info.index].flags;
249 info.count = vdev->irqs[info.index].count;
251 return copy_to_user((void __user *)arg, &info, minsz);
253 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
254 struct vfio_irq_set hdr;
258 minsz = offsetofend(struct vfio_irq_set, count);
260 if (copy_from_user(&hdr, (void __user *)arg, minsz))
263 if (hdr.argsz < minsz)
266 if (hdr.index >= vdev->num_irqs)
269 if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
270 VFIO_IRQ_SET_ACTION_TYPE_MASK))
273 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
276 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
277 size = sizeof(uint8_t);
278 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
279 size = sizeof(int32_t);
283 if (hdr.argsz - minsz < size)
286 data = memdup_user((void __user *)(arg + minsz), size);
288 return PTR_ERR(data);
291 mutex_lock(&vdev->igate);
293 ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
294 hdr.start, hdr.count, data);
295 mutex_unlock(&vdev->igate);
300 } else if (cmd == VFIO_DEVICE_RESET) {
302 return vdev->reset(vdev);
310 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
311 char __user *buf, size_t count,
314 unsigned int done = 0;
318 ioremap_nocache(reg.addr, reg.size);
327 if (count >= 4 && !(off % 4)) {
330 val = ioread32(reg.ioaddr + off);
331 if (copy_to_user(buf, &val, 4))
335 } else if (count >= 2 && !(off % 2)) {
338 val = ioread16(reg.ioaddr + off);
339 if (copy_to_user(buf, &val, 2))
346 val = ioread8(reg.ioaddr + off);
347 if (copy_to_user(buf, &val, 1))
365 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
366 size_t count, loff_t *ppos)
368 struct vfio_platform_device *vdev = device_data;
369 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
370 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
372 if (index >= vdev->num_regions)
375 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
378 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
379 return vfio_platform_read_mmio(vdev->regions[index],
381 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
382 return -EINVAL; /* not implemented */
387 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
388 const char __user *buf, size_t count,
391 unsigned int done = 0;
395 ioremap_nocache(reg.addr, reg.size);
404 if (count >= 4 && !(off % 4)) {
407 if (copy_from_user(&val, buf, 4))
409 iowrite32(val, reg.ioaddr + off);
412 } else if (count >= 2 && !(off % 2)) {
415 if (copy_from_user(&val, buf, 2))
417 iowrite16(val, reg.ioaddr + off);
423 if (copy_from_user(&val, buf, 1))
425 iowrite8(val, reg.ioaddr + off);
441 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
442 size_t count, loff_t *ppos)
444 struct vfio_platform_device *vdev = device_data;
445 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
446 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
448 if (index >= vdev->num_regions)
451 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
454 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
455 return vfio_platform_write_mmio(vdev->regions[index],
457 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
458 return -EINVAL; /* not implemented */
463 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
464 struct vm_area_struct *vma)
466 u64 req_len, pgoff, req_start;
468 req_len = vma->vm_end - vma->vm_start;
469 pgoff = vma->vm_pgoff &
470 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
471 req_start = pgoff << PAGE_SHIFT;
473 if (region.size < PAGE_SIZE || req_start + req_len > region.size)
476 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
477 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
479 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
480 req_len, vma->vm_page_prot);
483 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
485 struct vfio_platform_device *vdev = device_data;
488 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
490 if (vma->vm_end < vma->vm_start)
492 if (!(vma->vm_flags & VM_SHARED))
494 if (index >= vdev->num_regions)
496 if (vma->vm_start & ~PAGE_MASK)
498 if (vma->vm_end & ~PAGE_MASK)
501 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
504 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
505 && (vma->vm_flags & VM_READ))
508 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
509 && (vma->vm_flags & VM_WRITE))
512 vma->vm_private_data = vdev;
514 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
515 return vfio_platform_mmap_mmio(vdev->regions[index], vma);
517 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
518 return -EINVAL; /* not implemented */
523 static const struct vfio_device_ops vfio_platform_ops = {
524 .name = "vfio-platform",
525 .open = vfio_platform_open,
526 .release = vfio_platform_release,
527 .ioctl = vfio_platform_ioctl,
528 .read = vfio_platform_read,
529 .write = vfio_platform_write,
530 .mmap = vfio_platform_mmap,
533 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
536 struct iommu_group *group;
542 group = iommu_group_get(dev);
544 pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
548 ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
550 iommu_group_put(group);
554 vfio_platform_get_reset(vdev, dev);
556 mutex_init(&vdev->igate);
560 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
562 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
564 struct vfio_platform_device *vdev;
566 vdev = vfio_del_group_dev(dev);
569 vfio_platform_put_reset(vdev);
570 iommu_group_put(dev->iommu_group);
575 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);