VFIO: platform: add reset struct and lookup table
[firefly-linux-kernel-4.4.55.git] / drivers / vfio / platform / vfio_platform_common.c
1 /*
2  * Copyright (C) 2013 - Virtual Open Systems
3  * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
23
24 #include "vfio_platform_private.h"
25
26 static DEFINE_MUTEX(driver_lock);
27
28 static const struct vfio_platform_reset_combo reset_lookup_table[] = {
29 };
30
31 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
32 {
33         int cnt = 0, i;
34
35         while (vdev->get_resource(vdev, cnt))
36                 cnt++;
37
38         vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
39                                 GFP_KERNEL);
40         if (!vdev->regions)
41                 return -ENOMEM;
42
43         for (i = 0; i < cnt;  i++) {
44                 struct resource *res =
45                         vdev->get_resource(vdev, i);
46
47                 if (!res)
48                         goto err;
49
50                 vdev->regions[i].addr = res->start;
51                 vdev->regions[i].size = resource_size(res);
52                 vdev->regions[i].flags = 0;
53
54                 switch (resource_type(res)) {
55                 case IORESOURCE_MEM:
56                         vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
57                         vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
58                         if (!(res->flags & IORESOURCE_READONLY))
59                                 vdev->regions[i].flags |=
60                                         VFIO_REGION_INFO_FLAG_WRITE;
61
62                         /*
63                          * Only regions addressed with PAGE granularity may be
64                          * MMAPed securely.
65                          */
66                         if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
67                                         !(vdev->regions[i].size & ~PAGE_MASK))
68                                 vdev->regions[i].flags |=
69                                         VFIO_REGION_INFO_FLAG_MMAP;
70
71                         break;
72                 case IORESOURCE_IO:
73                         vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
74                         break;
75                 default:
76                         goto err;
77                 }
78         }
79
80         vdev->num_regions = cnt;
81
82         return 0;
83 err:
84         kfree(vdev->regions);
85         return -EINVAL;
86 }
87
88 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
89 {
90         int i;
91
92         for (i = 0; i < vdev->num_regions; i++)
93                 iounmap(vdev->regions[i].ioaddr);
94
95         vdev->num_regions = 0;
96         kfree(vdev->regions);
97 }
98
99 static void vfio_platform_release(void *device_data)
100 {
101         struct vfio_platform_device *vdev = device_data;
102
103         mutex_lock(&driver_lock);
104
105         if (!(--vdev->refcnt)) {
106                 vfio_platform_regions_cleanup(vdev);
107                 vfio_platform_irq_cleanup(vdev);
108         }
109
110         mutex_unlock(&driver_lock);
111
112         module_put(THIS_MODULE);
113 }
114
115 static int vfio_platform_open(void *device_data)
116 {
117         struct vfio_platform_device *vdev = device_data;
118         int ret;
119
120         if (!try_module_get(THIS_MODULE))
121                 return -ENODEV;
122
123         mutex_lock(&driver_lock);
124
125         if (!vdev->refcnt) {
126                 ret = vfio_platform_regions_init(vdev);
127                 if (ret)
128                         goto err_reg;
129
130                 ret = vfio_platform_irq_init(vdev);
131                 if (ret)
132                         goto err_irq;
133         }
134
135         vdev->refcnt++;
136
137         mutex_unlock(&driver_lock);
138         return 0;
139
140 err_irq:
141         vfio_platform_regions_cleanup(vdev);
142 err_reg:
143         mutex_unlock(&driver_lock);
144         module_put(THIS_MODULE);
145         return ret;
146 }
147
148 static long vfio_platform_ioctl(void *device_data,
149                                 unsigned int cmd, unsigned long arg)
150 {
151         struct vfio_platform_device *vdev = device_data;
152         unsigned long minsz;
153
154         if (cmd == VFIO_DEVICE_GET_INFO) {
155                 struct vfio_device_info info;
156
157                 minsz = offsetofend(struct vfio_device_info, num_irqs);
158
159                 if (copy_from_user(&info, (void __user *)arg, minsz))
160                         return -EFAULT;
161
162                 if (info.argsz < minsz)
163                         return -EINVAL;
164
165                 info.flags = vdev->flags;
166                 info.num_regions = vdev->num_regions;
167                 info.num_irqs = vdev->num_irqs;
168
169                 return copy_to_user((void __user *)arg, &info, minsz);
170
171         } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
172                 struct vfio_region_info info;
173
174                 minsz = offsetofend(struct vfio_region_info, offset);
175
176                 if (copy_from_user(&info, (void __user *)arg, minsz))
177                         return -EFAULT;
178
179                 if (info.argsz < minsz)
180                         return -EINVAL;
181
182                 if (info.index >= vdev->num_regions)
183                         return -EINVAL;
184
185                 /* map offset to the physical address  */
186                 info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
187                 info.size = vdev->regions[info.index].size;
188                 info.flags = vdev->regions[info.index].flags;
189
190                 return copy_to_user((void __user *)arg, &info, minsz);
191
192         } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
193                 struct vfio_irq_info info;
194
195                 minsz = offsetofend(struct vfio_irq_info, count);
196
197                 if (copy_from_user(&info, (void __user *)arg, minsz))
198                         return -EFAULT;
199
200                 if (info.argsz < minsz)
201                         return -EINVAL;
202
203                 if (info.index >= vdev->num_irqs)
204                         return -EINVAL;
205
206                 info.flags = vdev->irqs[info.index].flags;
207                 info.count = vdev->irqs[info.index].count;
208
209                 return copy_to_user((void __user *)arg, &info, minsz);
210
211         } else if (cmd == VFIO_DEVICE_SET_IRQS) {
212                 struct vfio_irq_set hdr;
213                 u8 *data = NULL;
214                 int ret = 0;
215
216                 minsz = offsetofend(struct vfio_irq_set, count);
217
218                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
219                         return -EFAULT;
220
221                 if (hdr.argsz < minsz)
222                         return -EINVAL;
223
224                 if (hdr.index >= vdev->num_irqs)
225                         return -EINVAL;
226
227                 if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
228                                   VFIO_IRQ_SET_ACTION_TYPE_MASK))
229                         return -EINVAL;
230
231                 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
232                         size_t size;
233
234                         if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
235                                 size = sizeof(uint8_t);
236                         else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
237                                 size = sizeof(int32_t);
238                         else
239                                 return -EINVAL;
240
241                         if (hdr.argsz - minsz < size)
242                                 return -EINVAL;
243
244                         data = memdup_user((void __user *)(arg + minsz), size);
245                         if (IS_ERR(data))
246                                 return PTR_ERR(data);
247                 }
248
249                 mutex_lock(&vdev->igate);
250
251                 ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
252                                                    hdr.start, hdr.count, data);
253                 mutex_unlock(&vdev->igate);
254                 kfree(data);
255
256                 return ret;
257
258         } else if (cmd == VFIO_DEVICE_RESET)
259                 return -EINVAL;
260
261         return -ENOTTY;
262 }
263
264 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
265                                        char __user *buf, size_t count,
266                                        loff_t off)
267 {
268         unsigned int done = 0;
269
270         if (!reg.ioaddr) {
271                 reg.ioaddr =
272                         ioremap_nocache(reg.addr, reg.size);
273
274                 if (!reg.ioaddr)
275                         return -ENOMEM;
276         }
277
278         while (count) {
279                 size_t filled;
280
281                 if (count >= 4 && !(off % 4)) {
282                         u32 val;
283
284                         val = ioread32(reg.ioaddr + off);
285                         if (copy_to_user(buf, &val, 4))
286                                 goto err;
287
288                         filled = 4;
289                 } else if (count >= 2 && !(off % 2)) {
290                         u16 val;
291
292                         val = ioread16(reg.ioaddr + off);
293                         if (copy_to_user(buf, &val, 2))
294                                 goto err;
295
296                         filled = 2;
297                 } else {
298                         u8 val;
299
300                         val = ioread8(reg.ioaddr + off);
301                         if (copy_to_user(buf, &val, 1))
302                                 goto err;
303
304                         filled = 1;
305                 }
306
307
308                 count -= filled;
309                 done += filled;
310                 off += filled;
311                 buf += filled;
312         }
313
314         return done;
315 err:
316         return -EFAULT;
317 }
318
319 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
320                                   size_t count, loff_t *ppos)
321 {
322         struct vfio_platform_device *vdev = device_data;
323         unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
324         loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
325
326         if (index >= vdev->num_regions)
327                 return -EINVAL;
328
329         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
330                 return -EINVAL;
331
332         if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
333                 return vfio_platform_read_mmio(vdev->regions[index],
334                                                         buf, count, off);
335         else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
336                 return -EINVAL; /* not implemented */
337
338         return -EINVAL;
339 }
340
341 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
342                                         const char __user *buf, size_t count,
343                                         loff_t off)
344 {
345         unsigned int done = 0;
346
347         if (!reg.ioaddr) {
348                 reg.ioaddr =
349                         ioremap_nocache(reg.addr, reg.size);
350
351                 if (!reg.ioaddr)
352                         return -ENOMEM;
353         }
354
355         while (count) {
356                 size_t filled;
357
358                 if (count >= 4 && !(off % 4)) {
359                         u32 val;
360
361                         if (copy_from_user(&val, buf, 4))
362                                 goto err;
363                         iowrite32(val, reg.ioaddr + off);
364
365                         filled = 4;
366                 } else if (count >= 2 && !(off % 2)) {
367                         u16 val;
368
369                         if (copy_from_user(&val, buf, 2))
370                                 goto err;
371                         iowrite16(val, reg.ioaddr + off);
372
373                         filled = 2;
374                 } else {
375                         u8 val;
376
377                         if (copy_from_user(&val, buf, 1))
378                                 goto err;
379                         iowrite8(val, reg.ioaddr + off);
380
381                         filled = 1;
382                 }
383
384                 count -= filled;
385                 done += filled;
386                 off += filled;
387                 buf += filled;
388         }
389
390         return done;
391 err:
392         return -EFAULT;
393 }
394
395 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
396                                    size_t count, loff_t *ppos)
397 {
398         struct vfio_platform_device *vdev = device_data;
399         unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
400         loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
401
402         if (index >= vdev->num_regions)
403                 return -EINVAL;
404
405         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
406                 return -EINVAL;
407
408         if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
409                 return vfio_platform_write_mmio(vdev->regions[index],
410                                                         buf, count, off);
411         else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
412                 return -EINVAL; /* not implemented */
413
414         return -EINVAL;
415 }
416
417 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
418                                    struct vm_area_struct *vma)
419 {
420         u64 req_len, pgoff, req_start;
421
422         req_len = vma->vm_end - vma->vm_start;
423         pgoff = vma->vm_pgoff &
424                 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
425         req_start = pgoff << PAGE_SHIFT;
426
427         if (region.size < PAGE_SIZE || req_start + req_len > region.size)
428                 return -EINVAL;
429
430         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
431         vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
432
433         return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
434                                req_len, vma->vm_page_prot);
435 }
436
437 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
438 {
439         struct vfio_platform_device *vdev = device_data;
440         unsigned int index;
441
442         index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
443
444         if (vma->vm_end < vma->vm_start)
445                 return -EINVAL;
446         if (!(vma->vm_flags & VM_SHARED))
447                 return -EINVAL;
448         if (index >= vdev->num_regions)
449                 return -EINVAL;
450         if (vma->vm_start & ~PAGE_MASK)
451                 return -EINVAL;
452         if (vma->vm_end & ~PAGE_MASK)
453                 return -EINVAL;
454
455         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
456                 return -EINVAL;
457
458         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
459                         && (vma->vm_flags & VM_READ))
460                 return -EINVAL;
461
462         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
463                         && (vma->vm_flags & VM_WRITE))
464                 return -EINVAL;
465
466         vma->vm_private_data = vdev;
467
468         if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
469                 return vfio_platform_mmap_mmio(vdev->regions[index], vma);
470
471         else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
472                 return -EINVAL; /* not implemented */
473
474         return -EINVAL;
475 }
476
477 static const struct vfio_device_ops vfio_platform_ops = {
478         .name           = "vfio-platform",
479         .open           = vfio_platform_open,
480         .release        = vfio_platform_release,
481         .ioctl          = vfio_platform_ioctl,
482         .read           = vfio_platform_read,
483         .write          = vfio_platform_write,
484         .mmap           = vfio_platform_mmap,
485 };
486
487 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
488                                struct device *dev)
489 {
490         struct iommu_group *group;
491         int ret;
492
493         if (!vdev)
494                 return -EINVAL;
495
496         group = iommu_group_get(dev);
497         if (!group) {
498                 pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
499                 return -EINVAL;
500         }
501
502         ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
503         if (ret) {
504                 iommu_group_put(group);
505                 return ret;
506         }
507
508         mutex_init(&vdev->igate);
509
510         return 0;
511 }
512 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
513
514 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
515 {
516         struct vfio_platform_device *vdev;
517
518         vdev = vfio_del_group_dev(dev);
519         if (vdev)
520                 iommu_group_put(dev->iommu_group);
521
522         return vdev;
523 }
524 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);