VFIO: platform: populate the reset function on probe
[firefly-linux-kernel-4.4.55.git] / drivers / vfio / platform / vfio_platform_common.c
1 /*
2  * Copyright (C) 2013 - Virtual Open Systems
3  * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
23
24 #include "vfio_platform_private.h"
25
26 static DEFINE_MUTEX(driver_lock);
27
28 static const struct vfio_platform_reset_combo reset_lookup_table[] = {
29 };
30
31 static void vfio_platform_get_reset(struct vfio_platform_device *vdev,
32                                     struct device *dev)
33 {
34         const char *compat;
35         int (*reset)(struct vfio_platform_device *);
36         int ret, i;
37
38         ret = device_property_read_string(dev, "compatible", &compat);
39         if (ret)
40                 return;
41
42         for (i = 0 ; i < ARRAY_SIZE(reset_lookup_table); i++) {
43                 if (!strcmp(reset_lookup_table[i].compat, compat)) {
44                         request_module(reset_lookup_table[i].module_name);
45                         reset = __symbol_get(
46                                 reset_lookup_table[i].reset_function_name);
47                         if (reset) {
48                                 vdev->reset = reset;
49                                 return;
50                         }
51                 }
52         }
53 }
54
55 static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
56 {
57         if (vdev->reset)
58                 symbol_put_addr(vdev->reset);
59 }
60
61 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
62 {
63         int cnt = 0, i;
64
65         while (vdev->get_resource(vdev, cnt))
66                 cnt++;
67
68         vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
69                                 GFP_KERNEL);
70         if (!vdev->regions)
71                 return -ENOMEM;
72
73         for (i = 0; i < cnt;  i++) {
74                 struct resource *res =
75                         vdev->get_resource(vdev, i);
76
77                 if (!res)
78                         goto err;
79
80                 vdev->regions[i].addr = res->start;
81                 vdev->regions[i].size = resource_size(res);
82                 vdev->regions[i].flags = 0;
83
84                 switch (resource_type(res)) {
85                 case IORESOURCE_MEM:
86                         vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
87                         vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
88                         if (!(res->flags & IORESOURCE_READONLY))
89                                 vdev->regions[i].flags |=
90                                         VFIO_REGION_INFO_FLAG_WRITE;
91
92                         /*
93                          * Only regions addressed with PAGE granularity may be
94                          * MMAPed securely.
95                          */
96                         if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
97                                         !(vdev->regions[i].size & ~PAGE_MASK))
98                                 vdev->regions[i].flags |=
99                                         VFIO_REGION_INFO_FLAG_MMAP;
100
101                         break;
102                 case IORESOURCE_IO:
103                         vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
104                         break;
105                 default:
106                         goto err;
107                 }
108         }
109
110         vdev->num_regions = cnt;
111
112         return 0;
113 err:
114         kfree(vdev->regions);
115         return -EINVAL;
116 }
117
118 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
119 {
120         int i;
121
122         for (i = 0; i < vdev->num_regions; i++)
123                 iounmap(vdev->regions[i].ioaddr);
124
125         vdev->num_regions = 0;
126         kfree(vdev->regions);
127 }
128
129 static void vfio_platform_release(void *device_data)
130 {
131         struct vfio_platform_device *vdev = device_data;
132
133         mutex_lock(&driver_lock);
134
135         if (!(--vdev->refcnt)) {
136                 if (vdev->reset)
137                         vdev->reset(vdev);
138                 vfio_platform_regions_cleanup(vdev);
139                 vfio_platform_irq_cleanup(vdev);
140         }
141
142         mutex_unlock(&driver_lock);
143
144         module_put(THIS_MODULE);
145 }
146
147 static int vfio_platform_open(void *device_data)
148 {
149         struct vfio_platform_device *vdev = device_data;
150         int ret;
151
152         if (!try_module_get(THIS_MODULE))
153                 return -ENODEV;
154
155         mutex_lock(&driver_lock);
156
157         if (!vdev->refcnt) {
158                 ret = vfio_platform_regions_init(vdev);
159                 if (ret)
160                         goto err_reg;
161
162                 ret = vfio_platform_irq_init(vdev);
163                 if (ret)
164                         goto err_irq;
165
166                 if (vdev->reset)
167                         vdev->reset(vdev);
168         }
169
170         vdev->refcnt++;
171
172         mutex_unlock(&driver_lock);
173         return 0;
174
175 err_irq:
176         vfio_platform_regions_cleanup(vdev);
177 err_reg:
178         mutex_unlock(&driver_lock);
179         module_put(THIS_MODULE);
180         return ret;
181 }
182
183 static long vfio_platform_ioctl(void *device_data,
184                                 unsigned int cmd, unsigned long arg)
185 {
186         struct vfio_platform_device *vdev = device_data;
187         unsigned long minsz;
188
189         if (cmd == VFIO_DEVICE_GET_INFO) {
190                 struct vfio_device_info info;
191
192                 minsz = offsetofend(struct vfio_device_info, num_irqs);
193
194                 if (copy_from_user(&info, (void __user *)arg, minsz))
195                         return -EFAULT;
196
197                 if (info.argsz < minsz)
198                         return -EINVAL;
199
200                 if (vdev->reset)
201                         vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
202                 info.flags = vdev->flags;
203                 info.num_regions = vdev->num_regions;
204                 info.num_irqs = vdev->num_irqs;
205
206                 return copy_to_user((void __user *)arg, &info, minsz);
207
208         } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
209                 struct vfio_region_info info;
210
211                 minsz = offsetofend(struct vfio_region_info, offset);
212
213                 if (copy_from_user(&info, (void __user *)arg, minsz))
214                         return -EFAULT;
215
216                 if (info.argsz < minsz)
217                         return -EINVAL;
218
219                 if (info.index >= vdev->num_regions)
220                         return -EINVAL;
221
222                 /* map offset to the physical address  */
223                 info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
224                 info.size = vdev->regions[info.index].size;
225                 info.flags = vdev->regions[info.index].flags;
226
227                 return copy_to_user((void __user *)arg, &info, minsz);
228
229         } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
230                 struct vfio_irq_info info;
231
232                 minsz = offsetofend(struct vfio_irq_info, count);
233
234                 if (copy_from_user(&info, (void __user *)arg, minsz))
235                         return -EFAULT;
236
237                 if (info.argsz < minsz)
238                         return -EINVAL;
239
240                 if (info.index >= vdev->num_irqs)
241                         return -EINVAL;
242
243                 info.flags = vdev->irqs[info.index].flags;
244                 info.count = vdev->irqs[info.index].count;
245
246                 return copy_to_user((void __user *)arg, &info, minsz);
247
248         } else if (cmd == VFIO_DEVICE_SET_IRQS) {
249                 struct vfio_irq_set hdr;
250                 u8 *data = NULL;
251                 int ret = 0;
252
253                 minsz = offsetofend(struct vfio_irq_set, count);
254
255                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
256                         return -EFAULT;
257
258                 if (hdr.argsz < minsz)
259                         return -EINVAL;
260
261                 if (hdr.index >= vdev->num_irqs)
262                         return -EINVAL;
263
264                 if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
265                                   VFIO_IRQ_SET_ACTION_TYPE_MASK))
266                         return -EINVAL;
267
268                 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
269                         size_t size;
270
271                         if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
272                                 size = sizeof(uint8_t);
273                         else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
274                                 size = sizeof(int32_t);
275                         else
276                                 return -EINVAL;
277
278                         if (hdr.argsz - minsz < size)
279                                 return -EINVAL;
280
281                         data = memdup_user((void __user *)(arg + minsz), size);
282                         if (IS_ERR(data))
283                                 return PTR_ERR(data);
284                 }
285
286                 mutex_lock(&vdev->igate);
287
288                 ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
289                                                    hdr.start, hdr.count, data);
290                 mutex_unlock(&vdev->igate);
291                 kfree(data);
292
293                 return ret;
294
295         } else if (cmd == VFIO_DEVICE_RESET) {
296                 if (vdev->reset)
297                         return vdev->reset(vdev);
298                 else
299                         return -EINVAL;
300         }
301
302         return -ENOTTY;
303 }
304
305 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
306                                        char __user *buf, size_t count,
307                                        loff_t off)
308 {
309         unsigned int done = 0;
310
311         if (!reg.ioaddr) {
312                 reg.ioaddr =
313                         ioremap_nocache(reg.addr, reg.size);
314
315                 if (!reg.ioaddr)
316                         return -ENOMEM;
317         }
318
319         while (count) {
320                 size_t filled;
321
322                 if (count >= 4 && !(off % 4)) {
323                         u32 val;
324
325                         val = ioread32(reg.ioaddr + off);
326                         if (copy_to_user(buf, &val, 4))
327                                 goto err;
328
329                         filled = 4;
330                 } else if (count >= 2 && !(off % 2)) {
331                         u16 val;
332
333                         val = ioread16(reg.ioaddr + off);
334                         if (copy_to_user(buf, &val, 2))
335                                 goto err;
336
337                         filled = 2;
338                 } else {
339                         u8 val;
340
341                         val = ioread8(reg.ioaddr + off);
342                         if (copy_to_user(buf, &val, 1))
343                                 goto err;
344
345                         filled = 1;
346                 }
347
348
349                 count -= filled;
350                 done += filled;
351                 off += filled;
352                 buf += filled;
353         }
354
355         return done;
356 err:
357         return -EFAULT;
358 }
359
360 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
361                                   size_t count, loff_t *ppos)
362 {
363         struct vfio_platform_device *vdev = device_data;
364         unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
365         loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
366
367         if (index >= vdev->num_regions)
368                 return -EINVAL;
369
370         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
371                 return -EINVAL;
372
373         if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
374                 return vfio_platform_read_mmio(vdev->regions[index],
375                                                         buf, count, off);
376         else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
377                 return -EINVAL; /* not implemented */
378
379         return -EINVAL;
380 }
381
382 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
383                                         const char __user *buf, size_t count,
384                                         loff_t off)
385 {
386         unsigned int done = 0;
387
388         if (!reg.ioaddr) {
389                 reg.ioaddr =
390                         ioremap_nocache(reg.addr, reg.size);
391
392                 if (!reg.ioaddr)
393                         return -ENOMEM;
394         }
395
396         while (count) {
397                 size_t filled;
398
399                 if (count >= 4 && !(off % 4)) {
400                         u32 val;
401
402                         if (copy_from_user(&val, buf, 4))
403                                 goto err;
404                         iowrite32(val, reg.ioaddr + off);
405
406                         filled = 4;
407                 } else if (count >= 2 && !(off % 2)) {
408                         u16 val;
409
410                         if (copy_from_user(&val, buf, 2))
411                                 goto err;
412                         iowrite16(val, reg.ioaddr + off);
413
414                         filled = 2;
415                 } else {
416                         u8 val;
417
418                         if (copy_from_user(&val, buf, 1))
419                                 goto err;
420                         iowrite8(val, reg.ioaddr + off);
421
422                         filled = 1;
423                 }
424
425                 count -= filled;
426                 done += filled;
427                 off += filled;
428                 buf += filled;
429         }
430
431         return done;
432 err:
433         return -EFAULT;
434 }
435
436 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
437                                    size_t count, loff_t *ppos)
438 {
439         struct vfio_platform_device *vdev = device_data;
440         unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
441         loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
442
443         if (index >= vdev->num_regions)
444                 return -EINVAL;
445
446         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
447                 return -EINVAL;
448
449         if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
450                 return vfio_platform_write_mmio(vdev->regions[index],
451                                                         buf, count, off);
452         else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
453                 return -EINVAL; /* not implemented */
454
455         return -EINVAL;
456 }
457
458 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
459                                    struct vm_area_struct *vma)
460 {
461         u64 req_len, pgoff, req_start;
462
463         req_len = vma->vm_end - vma->vm_start;
464         pgoff = vma->vm_pgoff &
465                 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
466         req_start = pgoff << PAGE_SHIFT;
467
468         if (region.size < PAGE_SIZE || req_start + req_len > region.size)
469                 return -EINVAL;
470
471         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
472         vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
473
474         return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
475                                req_len, vma->vm_page_prot);
476 }
477
478 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
479 {
480         struct vfio_platform_device *vdev = device_data;
481         unsigned int index;
482
483         index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
484
485         if (vma->vm_end < vma->vm_start)
486                 return -EINVAL;
487         if (!(vma->vm_flags & VM_SHARED))
488                 return -EINVAL;
489         if (index >= vdev->num_regions)
490                 return -EINVAL;
491         if (vma->vm_start & ~PAGE_MASK)
492                 return -EINVAL;
493         if (vma->vm_end & ~PAGE_MASK)
494                 return -EINVAL;
495
496         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
497                 return -EINVAL;
498
499         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
500                         && (vma->vm_flags & VM_READ))
501                 return -EINVAL;
502
503         if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
504                         && (vma->vm_flags & VM_WRITE))
505                 return -EINVAL;
506
507         vma->vm_private_data = vdev;
508
509         if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
510                 return vfio_platform_mmap_mmio(vdev->regions[index], vma);
511
512         else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
513                 return -EINVAL; /* not implemented */
514
515         return -EINVAL;
516 }
517
518 static const struct vfio_device_ops vfio_platform_ops = {
519         .name           = "vfio-platform",
520         .open           = vfio_platform_open,
521         .release        = vfio_platform_release,
522         .ioctl          = vfio_platform_ioctl,
523         .read           = vfio_platform_read,
524         .write          = vfio_platform_write,
525         .mmap           = vfio_platform_mmap,
526 };
527
528 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
529                                struct device *dev)
530 {
531         struct iommu_group *group;
532         int ret;
533
534         if (!vdev)
535                 return -EINVAL;
536
537         group = iommu_group_get(dev);
538         if (!group) {
539                 pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
540                 return -EINVAL;
541         }
542
543         ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
544         if (ret) {
545                 iommu_group_put(group);
546                 return ret;
547         }
548
549         vfio_platform_get_reset(vdev, dev);
550
551         mutex_init(&vdev->igate);
552
553         return 0;
554 }
555 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
556
557 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
558 {
559         struct vfio_platform_device *vdev;
560
561         vdev = vfio_del_group_dev(dev);
562
563         if (vdev) {
564                 vfio_platform_put_reset(vdev);
565                 iommu_group_put(dev->iommu_group);
566         }
567
568         return vdev;
569 }
570 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);