video: rockchip: rkvdec: add drm&ion allocator support
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_iommu_drm.c
1 /*
2  * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
3  * author: Jung Zhao jung.zhao@rock-chips.com
4  *         Randy Li, randy.li@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/dma-iommu.h>
17
18 #include <linux/dma-buf.h>
19 #include <drm/drmP.h>
20 #include <drm/drm_atomic.h>
21 #include <drm/drm_crtc_helper.h>
22 #include <drm/drm_fb_helper.h>
23 #include <drm/drm_sync_helper.h>
24 #include <drm/rockchip_drm.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/rockchip-iovmm.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/memblock.h>
29 #include <linux/module.h>
30 #include <linux/of_address.h>
31 #include <linux/of_graph.h>
32 #include <linux/component.h>
33 #include <linux/fence.h>
34 #include <linux/console.h>
35 #include <linux/kref.h>
36 #include <linux/fdtable.h>
37
38 #include "vcodec_iommu_ops.h"
39
40 struct vcodec_drm_buffer {
41         struct list_head list;
42         struct dma_buf *dma_buf;
43         union {
44                 unsigned long iova;
45                 unsigned long phys;
46         };
47         void *cpu_addr;
48         unsigned long size;
49         int fd;
50         int index;
51         struct dma_buf_attachment *attach;
52         struct sg_table *sgt;
53         struct page **pages;
54         struct kref ref;
55         struct vcodec_iommu_session_info *session_info;
56 };
57
58 struct vcodec_iommu_drm_info {
59         struct iommu_domain *domain;
60         bool attached;
61 };
62
63 static struct vcodec_drm_buffer *
64 vcodec_drm_get_buffer_no_lock(struct vcodec_iommu_session_info *session_info,
65                               int idx)
66 {
67         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
68
69         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
70                                  list) {
71                 if (drm_buffer->index == idx)
72                         return drm_buffer;
73         }
74
75         return NULL;
76 }
77
78 static struct vcodec_drm_buffer *
79 vcodec_drm_get_buffer_fd_no_lock(struct vcodec_iommu_session_info *session_info,
80                                  int fd)
81 {
82         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
83
84         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
85                                  list) {
86                 if (drm_buffer->fd == fd)
87                         return drm_buffer;
88         }
89
90         return NULL;
91 }
92
93 static void vcodec_drm_detach(struct vcodec_iommu_info *iommu_info)
94 {
95         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
96         struct device *dev = iommu_info->dev;
97         struct iommu_domain *domain = drm_info->domain;
98
99         mutex_lock(&iommu_info->iommu_mutex);
100
101         if (!drm_info->attached) {
102                 mutex_unlock(&iommu_info->iommu_mutex);
103                 return;
104         }
105
106         iommu_detach_device(domain, dev);
107         drm_info->attached = false;
108
109         mutex_unlock(&iommu_info->iommu_mutex);
110 }
111
112 static int vcodec_drm_attach_unlock(struct vcodec_iommu_info *iommu_info)
113 {
114         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
115         struct device *dev = iommu_info->dev;
116         struct iommu_domain *domain = drm_info->domain;
117         int ret = 0;
118
119         ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
120         if (ret)
121                 return ret;
122
123         dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
124         ret = iommu_attach_device(domain, dev);
125         if (ret) {
126                 dev_err(dev, "Failed to attach iommu device\n");
127                 return ret;
128         }
129
130         if (!common_iommu_setup_dma_ops(dev, 0x10000000, SZ_2G, domain->ops)) {
131                 dev_err(dev, "Failed to set dma_ops\n");
132                 iommu_detach_device(domain, dev);
133                 ret = -ENODEV;
134         }
135
136         return ret;
137 }
138
139 static int vcodec_drm_attach(struct vcodec_iommu_info *iommu_info)
140 {
141         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
142         int ret;
143
144         mutex_lock(&iommu_info->iommu_mutex);
145
146         if (drm_info->attached) {
147                 mutex_unlock(&iommu_info->iommu_mutex);
148                 return 0;
149         }
150
151         ret = vcodec_drm_attach_unlock(iommu_info);
152         if (ret) {
153                 mutex_unlock(&iommu_info->iommu_mutex);
154                 return ret;
155         }
156
157         drm_info->attached = true;
158
159         mutex_unlock(&iommu_info->iommu_mutex);
160
161         return ret;
162 }
163
164 static void *vcodec_drm_sgt_map_kernel(struct vcodec_drm_buffer *drm_buffer)
165 {
166         struct vcodec_iommu_session_info *session_info =
167                 drm_buffer->session_info;
168         struct device *dev = session_info->dev;
169         struct scatterlist *sgl, *sg;
170         int nr_pages = PAGE_ALIGN(drm_buffer->size) >> PAGE_SHIFT;
171         int i = 0, j = 0, k = 0;
172         struct page *page;
173
174         drm_buffer->pages = kmalloc_array(nr_pages, sizeof(*drm_buffer->pages),
175                                           GFP_KERNEL);
176         if (!(drm_buffer->pages)) {
177                 dev_err(dev, "drm map can not alloc pages\n");
178
179                 return NULL;
180         }
181
182         sgl = drm_buffer->sgt->sgl;
183
184         for_each_sg(sgl, sg, drm_buffer->sgt->nents, i) {
185                 page = sg_page(sg);
186                 for (j = 0; j < sg->length / PAGE_SIZE; j++)
187                         drm_buffer->pages[k++] = page++;
188         }
189
190         return vmap(drm_buffer->pages, nr_pages, VM_MAP,
191                     pgprot_noncached(PAGE_KERNEL));
192 }
193
194 static void vcodec_drm_sgt_unmap_kernel(struct vcodec_drm_buffer *drm_buffer)
195 {
196         vunmap(drm_buffer->cpu_addr);
197         kfree(drm_buffer->pages);
198 }
199
200 static void vcodec_drm_clear_map(struct kref *ref)
201 {
202         struct vcodec_drm_buffer *drm_buffer =
203                 container_of(ref, struct vcodec_drm_buffer, ref);
204         struct vcodec_iommu_session_info *session_info =
205                 drm_buffer->session_info;
206         struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
207         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
208         struct device *dev = session_info->dev;
209         struct iommu_domain *domain = drm_info->domain;
210
211         mutex_lock(&iommu_info->iommu_mutex);
212         drm_info = session_info->iommu_info->private;
213         if (!drm_info->attached) {
214                 if (vcodec_drm_attach_unlock(session_info->iommu_info))
215                         dev_err(dev, "can't clea map, attach iommu failed.\n");
216         }
217
218         if (drm_buffer->cpu_addr) {
219                 vcodec_drm_sgt_unmap_kernel(drm_buffer);
220                 drm_buffer->cpu_addr = NULL;
221         }
222
223         if (drm_buffer->attach) {
224                 dma_buf_unmap_attachment(drm_buffer->attach, drm_buffer->sgt,
225                                          DMA_BIDIRECTIONAL);
226                 dma_buf_detach(drm_buffer->dma_buf, drm_buffer->attach);
227                 dma_buf_put(drm_buffer->dma_buf);
228                 drm_buffer->attach = NULL;
229         }
230
231         if (!drm_info->attached)
232                 iommu_detach_device(domain, dev);
233
234         mutex_unlock(&iommu_info->iommu_mutex);
235 }
236
237 static void vcdoec_drm_dump_info(struct vcodec_iommu_session_info *session_info)
238 {
239         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
240
241         vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
242                         "still there are below buffers stored in list\n");
243         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
244                                  list) {
245                 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
246                                 "index %d drm_buffer fd %d cpu_addr %p\n",
247                                 drm_buffer->index,
248                                 drm_buffer->fd, drm_buffer->cpu_addr);
249         }
250 }
251
252 static int vcodec_drm_free(struct vcodec_iommu_session_info *session_info,
253                            int idx)
254 {
255         struct device *dev = session_info->dev;
256         /* please double-check all maps have been release */
257         struct vcodec_drm_buffer *drm_buffer;
258
259         mutex_lock(&session_info->list_mutex);
260         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
261
262         if (!drm_buffer) {
263                 dev_err(dev, "can not find %d buffer in list\n", idx);
264                 mutex_unlock(&session_info->list_mutex);
265
266                 return -EINVAL;
267         }
268
269         if (atomic_read(&drm_buffer->ref.refcount) == 0) {
270                 dma_buf_put(drm_buffer->dma_buf);
271                 list_del_init(&drm_buffer->list);
272                 kfree(drm_buffer);
273         }
274         mutex_unlock(&session_info->list_mutex);
275
276         return 0;
277 }
278
279 static int
280 vcodec_drm_unmap_iommu(struct vcodec_iommu_session_info *session_info,
281                        int idx)
282 {
283         struct device *dev = session_info->dev;
284         struct vcodec_drm_buffer *drm_buffer;
285
286         /* Force to flush iommu table */
287         if (of_machine_is_compatible("rockchip,rk3288"))
288                 rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
289
290         mutex_lock(&session_info->list_mutex);
291         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
292         mutex_unlock(&session_info->list_mutex);
293
294         if (!drm_buffer) {
295                 dev_err(dev, "can not find %d buffer in list\n", idx);
296                 return -EINVAL;
297         }
298
299         kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
300
301         return 0;
302 }
303
304 static int vcodec_drm_map_iommu(struct vcodec_iommu_session_info *session_info,
305                                 int idx,
306                                 unsigned long *iova,
307                                 unsigned long *size)
308 {
309         struct device *dev = session_info->dev;
310         struct vcodec_drm_buffer *drm_buffer;
311
312         /* Force to flush iommu table */
313         if (of_machine_is_compatible("rockchip,rk3288"))
314                 rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
315
316         mutex_lock(&session_info->list_mutex);
317         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
318         mutex_unlock(&session_info->list_mutex);
319
320         if (!drm_buffer) {
321                 dev_err(dev, "can not find %d buffer in list\n", idx);
322                 return -EINVAL;
323         }
324
325         kref_get(&drm_buffer->ref);
326         if (iova)
327                 *iova = drm_buffer->iova;
328         if (size)
329                 *size = drm_buffer->size;
330         return 0;
331 }
332
333 static int
334 vcodec_drm_unmap_kernel(struct vcodec_iommu_session_info *session_info, int idx)
335 {
336         struct device *dev = session_info->dev;
337         struct vcodec_drm_buffer *drm_buffer;
338
339         mutex_lock(&session_info->list_mutex);
340         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
341         mutex_unlock(&session_info->list_mutex);
342
343         if (!drm_buffer) {
344                 dev_err(dev, "can not find %d buffer in list\n", idx);
345
346                 return -EINVAL;
347         }
348
349         if (drm_buffer->cpu_addr) {
350                 vcodec_drm_sgt_unmap_kernel(drm_buffer);
351                 drm_buffer->cpu_addr = NULL;
352         }
353
354         kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
355         return 0;
356 }
357
358 static int
359 vcodec_drm_free_fd(struct vcodec_iommu_session_info *session_info, int fd)
360 {
361         struct device *dev = session_info->dev;
362         /* please double-check all maps have been release */
363         struct vcodec_drm_buffer *drm_buffer = NULL;
364
365         mutex_lock(&session_info->list_mutex);
366         drm_buffer = vcodec_drm_get_buffer_fd_no_lock(session_info, fd);
367
368         if (!drm_buffer) {
369                 dev_err(dev, "can not find %d buffer in list\n", fd);
370                 mutex_unlock(&session_info->list_mutex);
371
372                 return -EINVAL;
373         }
374         mutex_unlock(&session_info->list_mutex);
375
376         vcodec_drm_unmap_iommu(session_info, drm_buffer->index);
377
378         mutex_lock(&session_info->list_mutex);
379         if (atomic_read(&drm_buffer->ref.refcount) == 0) {
380                 dma_buf_put(drm_buffer->dma_buf);
381                 list_del_init(&drm_buffer->list);
382                 kfree(drm_buffer);
383         }
384         mutex_unlock(&session_info->list_mutex);
385
386         return 0;
387 }
388
389 static void
390 vcodec_drm_clear_session(struct vcodec_iommu_session_info *session_info)
391 {
392         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
393
394         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
395                                  list) {
396                 kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
397                 vcodec_drm_free(session_info, drm_buffer->index);
398         }
399 }
400
401 static void *
402 vcodec_drm_map_kernel(struct vcodec_iommu_session_info *session_info, int idx)
403 {
404         struct device *dev = session_info->dev;
405         struct vcodec_drm_buffer *drm_buffer;
406
407         mutex_lock(&session_info->list_mutex);
408         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
409         mutex_unlock(&session_info->list_mutex);
410
411         if (!drm_buffer) {
412                 dev_err(dev, "can not find %d buffer in list\n", idx);
413                 return NULL;
414         }
415
416         if (!drm_buffer->cpu_addr)
417                 drm_buffer->cpu_addr =
418                         vcodec_drm_sgt_map_kernel(drm_buffer);
419
420         kref_get(&drm_buffer->ref);
421
422         return drm_buffer->cpu_addr;
423 }
424
425 static int vcodec_drm_import(struct vcodec_iommu_session_info *session_info,
426                              int fd)
427 {
428         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
429         struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
430         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
431         struct device *dev = session_info->dev;
432         struct dma_buf_attachment *attach;
433         struct sg_table *sgt;
434         int ret = 0;
435
436         list_for_each_entry_safe(drm_buffer, n,
437                                  &session_info->buffer_list, list) {
438                 if (drm_buffer->fd == fd)
439                         return drm_buffer->index;
440         }
441
442         drm_buffer = kzalloc(sizeof(*drm_buffer), GFP_KERNEL);
443         if (!drm_buffer) {
444                 ret = -ENOMEM;
445                 return ret;
446         }
447
448         drm_buffer->dma_buf = dma_buf_get(fd);
449         if (IS_ERR(drm_buffer->dma_buf)) {
450                 ret = PTR_ERR(drm_buffer->dma_buf);
451                 kfree(drm_buffer);
452                 return ret;
453         }
454         drm_buffer->fd = fd;
455         drm_buffer->session_info = session_info;
456
457         kref_init(&drm_buffer->ref);
458
459         mutex_lock(&iommu_info->iommu_mutex);
460         drm_info = session_info->iommu_info->private;
461         if (!drm_info->attached) {
462                 ret = vcodec_drm_attach_unlock(session_info->iommu_info);
463                 if (ret)
464                         goto fail_out;
465         }
466
467         attach = dma_buf_attach(drm_buffer->dma_buf, dev);
468         if (IS_ERR(attach)) {
469                 ret = PTR_ERR(attach);
470                 goto fail_out;
471         }
472
473         get_dma_buf(drm_buffer->dma_buf);
474
475         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
476         if (IS_ERR(sgt)) {
477                 ret = PTR_ERR(sgt);
478                 goto fail_detach;
479         }
480
481         drm_buffer->iova = sg_dma_address(sgt->sgl);
482         drm_buffer->size = drm_buffer->dma_buf->size;
483
484         drm_buffer->attach = attach;
485         drm_buffer->sgt = sgt;
486
487         mutex_unlock(&iommu_info->iommu_mutex);
488
489         INIT_LIST_HEAD(&drm_buffer->list);
490         mutex_lock(&session_info->list_mutex);
491         drm_buffer->index = session_info->max_idx;
492         list_add_tail(&drm_buffer->list, &session_info->buffer_list);
493         session_info->max_idx++;
494         if ((session_info->max_idx & 0xfffffff) == 0)
495                 session_info->max_idx = 0;
496         mutex_unlock(&session_info->list_mutex);
497
498         return drm_buffer->index;
499
500 fail_detach:
501         dev_err(dev, "dmabuf map attach failed\n");
502         dma_buf_detach(drm_buffer->dma_buf, attach);
503         dma_buf_put(drm_buffer->dma_buf);
504 fail_out:
505         kfree(drm_buffer);
506         mutex_unlock(&iommu_info->iommu_mutex);
507
508         return ret;
509 }
510
511 static int vcodec_drm_create(struct vcodec_iommu_info *iommu_info)
512 {
513         struct vcodec_iommu_drm_info *drm_info;
514         int ret;
515
516         iommu_info->private = kzalloc(sizeof(*drm_info),
517                                       GFP_KERNEL);
518         drm_info = iommu_info->private;
519         if (!drm_info)
520                 return -ENOMEM;
521
522         drm_info->domain = iommu_domain_alloc(&platform_bus_type);
523         drm_info->attached = false;
524         if (!drm_info->domain)
525                 return -ENOMEM;
526
527         ret = iommu_get_dma_cookie(drm_info->domain);
528         if (ret)
529                 goto err_free_domain;
530
531         vcodec_drm_attach(iommu_info);
532
533         return 0;
534
535 err_free_domain:
536         iommu_domain_free(drm_info->domain);
537
538         return ret;
539 }
540
541 static int vcodec_drm_destroy(struct vcodec_iommu_info *iommu_info)
542 {
543         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
544
545         vcodec_drm_detach(iommu_info);
546         iommu_put_dma_cookie(drm_info->domain);
547         iommu_domain_free(drm_info->domain);
548
549         kfree(drm_info);
550         iommu_info->private = NULL;
551
552         return 0;
553 }
554
555 static struct vcodec_iommu_ops drm_ops = {
556         .create = vcodec_drm_create,
557         .import = vcodec_drm_import,
558         .free = vcodec_drm_free,
559         .free_fd = vcodec_drm_free_fd,
560         .map_kernel = vcodec_drm_map_kernel,
561         .unmap_kernel = vcodec_drm_unmap_kernel,
562         .map_iommu = vcodec_drm_map_iommu,
563         .unmap_iommu = vcodec_drm_unmap_iommu,
564         .destroy = vcodec_drm_destroy,
565         .dump = vcdoec_drm_dump_info,
566         .attach = vcodec_drm_attach,
567         .detach = vcodec_drm_detach,
568         .clear = vcodec_drm_clear_session,
569 };
570
571 void vcodec_iommu_drm_set_ops(struct vcodec_iommu_info *iommu_info)
572 {
573         if (!iommu_info)
574                 return;
575         iommu_info->ops = &drm_ops;
576 }