e03c6a0d40907df204f65e688aa0e116e84705e0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / rockchip / rockchip_drm_rga.c
1 #include <linux/clk.h>
2 #include <linux/debugfs.h>
3 #include <linux/delay.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/interrupt.h>
7 #include <linux/of.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/reset.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15
16 #include <asm/cacheflush.h>
17 #include <drm/drmP.h>
18 #include <drm/rockchip_drm.h>
19
20 #include "rockchip_drm_drv.h"
21 #include "rockchip_drm_rga.h"
22
23 #define RGA_MODE_BASE_REG               0x0100
24 #define RGA_MODE_MAX_REG                0x017C
25
26 #define RGA_SYS_CTRL                    0x0000
27 #define RGA_CMD_CTRL                    0x0004
28 #define RGA_CMD_BASE                    0x0008
29 #define RGA_INT                         0x0010
30 #define RGA_MMU_CTRL0                   0x0014
31 #define RGA_VERSION_INFO                0x0028
32
33 #define RGA_SRC_Y_RGB_BASE_ADDR         0x0108
34 #define RGA_SRC_CB_BASE_ADDR            0x010C
35 #define RGA_SRC_CR_BASE_ADDR            0x0110
36 #define RGA_SRC1_RGB_BASE_ADDR          0x0114
37 #define RGA_DST_Y_RGB_BASE_ADDR         0x013C
38 #define RGA_DST_CB_BASE_ADDR            0x0140
39 #define RGA_DST_CR_BASE_ADDR            0x014C
40 #define RGA_MMU_CTRL1                   0x016C
41 #define RGA_MMU_SRC_BASE                0x0170
42 #define RGA_MMU_SRC1_BASE               0x0174
43 #define RGA_MMU_DST_BASE                0x0178
44
45 static void __user *rga_compat_ptr(u64 value)
46 {
47 #ifdef CONFIG_ARM64
48         return (void __user *)(value);
49 #else
50         return (void __user *)((u32)(value));
51 #endif
52 }
53
54 static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
55 {
56         writel(value, rga->regs + reg);
57 }
58
59 static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
60 {
61         return readl(rga->regs + reg);
62 }
63
64 static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
65 {
66         u32 temp = rga_read(rga, reg) & ~(mask);
67
68         temp |= val & mask;
69         rga_write(rga, reg, temp);
70 }
71
72 static int rga_enable_clocks(struct rockchip_rga *rga)
73 {
74         int ret;
75
76         ret = clk_prepare_enable(rga->sclk);
77         if (ret) {
78                 dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
79                 return ret;
80         }
81
82         ret = clk_prepare_enable(rga->aclk);
83         if (ret) {
84                 dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
85                 goto err_disable_sclk;
86         }
87
88         ret = clk_prepare_enable(rga->hclk);
89         if (ret) {
90                 dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
91                 goto err_disable_aclk;
92         }
93
94         return 0;
95
96 err_disable_sclk:
97         clk_disable_unprepare(rga->sclk);
98 err_disable_aclk:
99         clk_disable_unprepare(rga->aclk);
100
101         return ret;
102 }
103
104 static void rga_disable_clocks(struct rockchip_rga *rga)
105 {
106         clk_disable_unprepare(rga->sclk);
107         clk_disable_unprepare(rga->hclk);
108         clk_disable_unprepare(rga->aclk);
109 }
110
111 static void rga_init_cmdlist(struct rockchip_rga *rga)
112 {
113         struct rga_cmdlist_node *node;
114         int nr;
115
116         node = rga->cmdlist_node;
117
118         for (nr = 0; nr < ARRAY_SIZE(rga->cmdlist_node); nr++)
119                 list_add_tail(&node[nr].list, &rga->free_cmdlist);
120 }
121
122 static int rga_alloc_dma_buf_for_cmdlist(struct rga_runqueue_node *runqueue)
123 {
124         struct list_head *run_cmdlist = &runqueue->run_cmdlist;
125         struct device *dev = runqueue->dev;
126         struct dma_attrs cmdlist_dma_attrs;
127         struct rga_cmdlist_node *node;
128         void *cmdlist_pool_virt;
129         dma_addr_t cmdlist_pool;
130         int cmdlist_cnt = 0;
131         int count = 0;
132
133         list_for_each_entry(node, run_cmdlist, list)
134                 cmdlist_cnt++;
135
136         init_dma_attrs(&cmdlist_dma_attrs);
137         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &runqueue->cmdlist_dma_attrs);
138
139         cmdlist_pool_virt = dma_alloc_attrs(dev, cmdlist_cnt * RGA_CMDLIST_SIZE,
140                                             &cmdlist_pool, GFP_KERNEL,
141                                             &cmdlist_dma_attrs);
142         if (!cmdlist_pool_virt) {
143                 dev_err(dev, "failed to allocate cmdlist dma memory\n");
144                 return -ENOMEM;
145         }
146
147         /*
148          * Fill in the RGA operation registers from cmdlist command buffer,
149          * and also filled in the MMU TLB base information.
150          */
151         list_for_each_entry(node, run_cmdlist, list) {
152                 struct rga_cmdlist *cmdlist = &node->cmdlist;
153                 unsigned int mmu_ctrl = 0;
154                 unsigned int reg;
155                 u32 *dest;
156                 int i;
157
158                 dest = cmdlist_pool_virt + RGA_CMDLIST_SIZE * 4 * count++;
159
160                 for (i = 0; i < cmdlist->last / 2; i++) {
161                         reg = (node->cmdlist.data[2 * i] - RGA_MODE_BASE_REG);
162                         if (reg > RGA_MODE_BASE_REG)
163                                 continue;
164                         dest[reg >> 2] = cmdlist->data[2 * i + 1];
165                 }
166
167                 if (cmdlist->src_mmu_pages) {
168                         reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
169                         dest[reg >> 2] =
170                             virt_to_phys(cmdlist->src_mmu_pages) >> 4;
171                         mmu_ctrl |= 0x7;
172                 }
173
174                 if (cmdlist->dst_mmu_pages) {
175                         reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
176                         dest[reg >> 2] =
177                             virt_to_phys(cmdlist->dst_mmu_pages) >> 4;
178                         mmu_ctrl |= 0x7 << 8;
179                 }
180
181                 if (cmdlist->src1_mmu_pages) {
182                         reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
183                         dest[reg >> 2] =
184                             virt_to_phys(cmdlist->src1_mmu_pages) >> 4;
185                         mmu_ctrl |= 0x7 << 4;
186                 }
187
188                 reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
189                 dest[reg >> 2] = mmu_ctrl;
190         }
191
192         dma_sync_single_for_device(runqueue->drm_dev->dev,
193                                    virt_to_phys(cmdlist_pool_virt),
194                                    PAGE_SIZE, DMA_TO_DEVICE);
195
196         runqueue->cmdlist_dma_attrs = cmdlist_dma_attrs;
197         runqueue->cmdlist_pool_virt = cmdlist_pool_virt;
198         runqueue->cmdlist_pool = cmdlist_pool;
199         runqueue->cmdlist_cnt = cmdlist_cnt;
200
201         return 0;
202 }
203
204 static int rga_check_reg_offset(struct device *dev,
205                                 struct rga_cmdlist_node *node)
206 {
207         struct rga_cmdlist *cmdlist = &node->cmdlist;
208         int index;
209         int reg;
210         int i;
211
212         for (i = 0; i < cmdlist->last / 2; i++) {
213                 index = cmdlist->last - 2 * (i + 1);
214                 reg = cmdlist->data[index];
215
216                 switch (reg & 0xffff) {
217                 case RGA_DST_Y_RGB_BASE_ADDR:
218                 case RGA_SRC_Y_RGB_BASE_ADDR:
219                 case RGA_SRC1_RGB_BASE_ADDR:
220                         break;
221                 default:
222                         if (reg < RGA_MODE_BASE_REG || reg > RGA_MODE_MAX_REG)
223                                 goto err;
224
225                         if (reg % 4)
226                                 goto err;
227                 }
228         }
229
230         return 0;
231
232 err:
233         dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
234         return -EINVAL;
235 }
236
237 static struct dma_buf_attachment *rga_gem_buf_to_pages(struct rockchip_rga *rga,
238                                                        void **mmu_pages, int fd,
239                                                        int flush)
240 {
241         struct dma_buf_attachment *attach;
242         struct dma_buf *dmabuf;
243         struct sg_table *sgt;
244         struct scatterlist *sgl;
245         unsigned int mapped_size = 0;
246         unsigned int address;
247         unsigned int len;
248         unsigned int i, p;
249         unsigned int *pages;
250         int ret;
251
252         dmabuf = dma_buf_get(fd);
253         if (IS_ERR(dmabuf)) {
254                 dev_err(rga->dev, "Failed to get dma_buf with fd %d\n", fd);
255                 return ERR_PTR(-EINVAL);
256         }
257
258         attach = dma_buf_attach(dmabuf, rga->dev);
259         if (IS_ERR(attach)) {
260                 dev_err(rga->dev, "Failed to attach dma_buf\n");
261                 ret = PTR_ERR(attach);
262                 goto failed_attach;
263         }
264
265         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
266         if (IS_ERR(sgt)) {
267                 dev_err(rga->dev, "Failed to map dma_buf attachment\n");
268                 ret = PTR_ERR(sgt);
269                 goto failed_detach;
270         }
271
272         /*
273          * Alloc (2^3 * 4K) = 32K byte for storing pages, those space could
274          * cover 32K * 4K = 128M ram address.
275          */
276         pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
277
278         for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
279                 len = sg_dma_len(sgl) >> PAGE_SHIFT;
280                 address = sg_phys(sgl);
281
282                 for (p = 0; p < len; p++) {
283                         dma_addr_t phys = address + (p << PAGE_SHIFT);
284                         pages[mapped_size + p] = phys;
285                 }
286
287                 mapped_size += len;
288         }
289
290         if (flush)
291                 dma_sync_sg_for_device(rga->drm_dev->dev, sgt->sgl, sgt->nents,
292                                        DMA_TO_DEVICE);
293
294         dma_sync_single_for_device(rga->drm_dev->dev, virt_to_phys(pages),
295                                    8 * PAGE_SIZE, DMA_TO_DEVICE);
296
297         *mmu_pages = pages;
298
299         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
300
301         return attach;
302
303 failed_detach:
304         dma_buf_detach(dmabuf, attach);
305 failed_attach:
306         dma_buf_put(dmabuf);
307
308         return ERR_PTR(ret);
309 }
310
311 static int rga_map_cmdlist_gem(struct rockchip_rga *rga,
312                                struct rga_cmdlist_node *node,
313                                struct drm_device *drm_dev,
314                                struct drm_file *file)
315 {
316         struct rga_cmdlist *cmdlist = &node->cmdlist;
317         struct dma_buf_attachment *attach;
318         void *mmu_pages;
319         int fd;
320         int i;
321
322         for (i = 0; i < cmdlist->last / 2; i++) {
323                 int index = cmdlist->last - 2 * (i + 1);
324                 int flush = cmdlist->data[index] & RGA_BUF_TYPE_FLUSH;
325
326                 switch (cmdlist->data[index] & 0xffff) {
327                 case RGA_SRC1_RGB_BASE_ADDR:
328                         if (cmdlist->data[index] & RGA_BUF_TYPE_GEMFD) {
329                                 fd = cmdlist->data[index + 1];
330                                 attach =
331                                     rga_gem_buf_to_pages(rga, &mmu_pages, fd,
332                                                          flush);
333                                 if (IS_ERR(attach))
334                                         return PTR_ERR(attach);
335
336                                 cmdlist->src1_attach = attach;
337                                 cmdlist->src1_mmu_pages = mmu_pages;
338                         }
339                         break;
340                 case RGA_SRC_Y_RGB_BASE_ADDR:
341                         if (cmdlist->data[index] & RGA_BUF_TYPE_GEMFD) {
342                                 fd = cmdlist->data[index + 1];
343                                 attach =
344                                     rga_gem_buf_to_pages(rga, &mmu_pages, fd,
345                                                          flush);
346                                 if (IS_ERR(attach))
347                                         return PTR_ERR(attach);
348
349                                 cmdlist->src_attach = attach;
350                                 cmdlist->src_mmu_pages = mmu_pages;
351                         }
352                         break;
353                 case RGA_DST_Y_RGB_BASE_ADDR:
354                         if (cmdlist->data[index] & RGA_BUF_TYPE_GEMFD) {
355                                 fd = cmdlist->data[index + 1];
356                                 attach =
357                                     rga_gem_buf_to_pages(rga, &mmu_pages, fd,
358                                                          flush);
359                                 if (IS_ERR(attach))
360                                         return PTR_ERR(attach);
361
362                                 cmdlist->dst_attach = attach;
363                                 cmdlist->dst_mmu_pages = mmu_pages;
364                         }
365                         break;
366                 }
367         }
368
369         return 0;
370 }
371
372 static void rga_unmap_cmdlist_gem(struct rockchip_rga *rga,
373                                   struct rga_cmdlist_node *node)
374 {
375         struct dma_buf_attachment *attach;
376         struct dma_buf *dma_buf;
377
378         attach = node->cmdlist.src_attach;
379         if (attach) {
380                 dma_buf = attach->dmabuf;
381                 dma_buf_detach(dma_buf, attach);
382                 dma_buf_put(dma_buf);
383         }
384         node->cmdlist.src_attach = NULL;
385
386         attach = node->cmdlist.src1_attach;
387         if (attach) {
388                 dma_buf = attach->dmabuf;
389                 dma_buf_detach(dma_buf, attach);
390                 dma_buf_put(dma_buf);
391         }
392         node->cmdlist.src1_attach = NULL;
393
394         attach = node->cmdlist.dst_attach;
395         if (attach) {
396                 dma_buf = attach->dmabuf;
397                 dma_buf_detach(dma_buf, attach);
398                 dma_buf_put(dma_buf);
399         }
400         node->cmdlist.dst_attach = NULL;
401
402         if (node->cmdlist.src_mmu_pages)
403                 free_pages((unsigned long)node->cmdlist.src_mmu_pages, 3);
404         node->cmdlist.src_mmu_pages = NULL;
405
406         if (node->cmdlist.src1_mmu_pages)
407                 free_pages((unsigned long)node->cmdlist.src1_mmu_pages, 3);
408         node->cmdlist.src1_mmu_pages = NULL;
409
410         if (node->cmdlist.dst_mmu_pages)
411                 free_pages((unsigned long)node->cmdlist.dst_mmu_pages, 3);
412         node->cmdlist.dst_mmu_pages = NULL;
413 }
414
415 static void rga_cmd_start(struct rockchip_rga *rga,
416                           struct rga_runqueue_node *runqueue)
417 {
418         int ret;
419
420         ret = pm_runtime_get_sync(rga->dev);
421         if (ret < 0)
422                 return;
423
424         rga_write(rga, RGA_SYS_CTRL, 0x00);
425
426         rga_write(rga, RGA_CMD_BASE, runqueue->cmdlist_pool);
427
428         rga_write(rga, RGA_SYS_CTRL, 0x22);
429
430         rga_write(rga, RGA_INT, 0x600);
431
432         rga_write(rga, RGA_CMD_CTRL, ((runqueue->cmdlist_cnt - 1) << 3) | 0x1);
433 }
434
435 static void rga_free_runqueue_node(struct rockchip_rga *rga,
436                                    struct rga_runqueue_node *runqueue)
437 {
438         struct rga_cmdlist_node *node;
439
440         if (!runqueue)
441                 return;
442
443         if (runqueue->cmdlist_pool_virt && runqueue->cmdlist_pool)
444                 dma_free_attrs(rga->dev, runqueue->cmdlist_cnt * RGA_CMDLIST_SIZE,
445                                runqueue->cmdlist_pool_virt,
446                                runqueue->cmdlist_pool,
447                                &runqueue->cmdlist_dma_attrs);
448
449         mutex_lock(&rga->cmdlist_mutex);
450         /*
451          * commands in run_cmdlist have been completed so unmap all gem
452          * objects in each command node so that they are unreferenced.
453          */
454         list_for_each_entry(node, &runqueue->run_cmdlist, list)
455                 rga_unmap_cmdlist_gem(rga, node);
456         list_splice_tail_init(&runqueue->run_cmdlist, &rga->free_cmdlist);
457         mutex_unlock(&rga->cmdlist_mutex);
458
459         kmem_cache_free(rga->runqueue_slab, runqueue);
460 }
461
462 static struct rga_runqueue_node *rga_get_runqueue(struct rockchip_rga *rga)
463 {
464         struct rga_runqueue_node *runqueue;
465
466         if (list_empty(&rga->runqueue_list))
467                 return NULL;
468
469         runqueue = list_first_entry(&rga->runqueue_list,
470                                     struct rga_runqueue_node, list);
471         list_del_init(&runqueue->list);
472
473         return runqueue;
474 }
475
476 static void rga_exec_runqueue(struct rockchip_rga *rga)
477 {
478         rga->runqueue_node = rga_get_runqueue(rga);
479         if (rga->runqueue_node)
480                 rga_cmd_start(rga, rga->runqueue_node);
481 }
482
483 static struct rga_cmdlist_node *rga_get_cmdlist(struct rockchip_rga *rga)
484 {
485         struct rga_cmdlist_node *node;
486         struct device *dev = rga->dev;
487
488         mutex_lock(&rga->cmdlist_mutex);
489         if (list_empty(&rga->free_cmdlist)) {
490                 dev_err(dev, "there is no free cmdlist\n");
491                 mutex_unlock(&rga->cmdlist_mutex);
492                 return NULL;
493         }
494
495         node = list_first_entry(&rga->free_cmdlist,
496                                 struct rga_cmdlist_node, list);
497         list_del_init(&node->list);
498         mutex_unlock(&rga->cmdlist_mutex);
499
500         return node;
501 }
502
503 static void rga_put_cmdlist(struct rockchip_rga *rga, struct rga_cmdlist_node *node)
504 {
505         mutex_lock(&rga->cmdlist_mutex);
506         list_move_tail(&node->list, &rga->free_cmdlist);
507         mutex_unlock(&rga->cmdlist_mutex);
508 }
509
510 static void rga_add_cmdlist_to_inuse(struct rockchip_drm_rga_private *rga_priv,
511                                      struct rga_cmdlist_node *node)
512 {
513         struct rga_cmdlist_node *lnode;
514
515         if (list_empty(&rga_priv->inuse_cmdlist))
516                 goto add_to_list;
517
518         /* this links to base address of new cmdlist */
519         lnode = list_entry(rga_priv->inuse_cmdlist.prev,
520                            struct rga_cmdlist_node, list);
521
522 add_to_list:
523         list_add_tail(&node->list, &rga_priv->inuse_cmdlist);
524 }
525
526 /*
527  * IOCRL functions for userspace to get RGA version.
528  */
529 int rockchip_rga_get_ver_ioctl(struct drm_device *drm_dev, void *data,
530                                struct drm_file *file)
531 {
532         struct rockchip_drm_file_private *file_priv = file->driver_priv;
533         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
534         struct drm_rockchip_rga_get_ver *ver = data;
535         struct rockchip_rga *rga;
536         struct device *dev;
537
538         if (!rga_priv)
539                 return -ENODEV;
540
541         dev = rga_priv->dev;
542         if (!dev)
543                 return -ENODEV;
544
545         rga = dev_get_drvdata(dev);
546         if (!rga)
547                 return -EFAULT;
548
549         ver->major = rga->version.major;
550         ver->minor = rga->version.minor;
551
552         return 0;
553 }
554
555 /*
556  * IOCRL functions for userspace to send an RGA request.
557  */
558 int rockchip_rga_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
559                                    struct drm_file *file)
560 {
561         struct rockchip_drm_file_private *file_priv = file->driver_priv;
562         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
563         struct drm_rockchip_rga_set_cmdlist *req = data;
564         struct rga_cmdlist_node *node;
565         struct rga_cmdlist *cmdlist;
566         struct rockchip_rga *rga;
567         int ret;
568
569         if (!rga_priv)
570                 return -ENODEV;
571
572         if (!rga_priv->dev)
573                 return -ENODEV;
574
575         rga = dev_get_drvdata(rga_priv->dev);
576         if (!rga)
577                 return -EFAULT;
578
579         if (req->cmd_nr > RGA_CMDLIST_SIZE || req->cmd_buf_nr > RGA_CMDBUF_SIZE) {
580                 dev_err(rga->dev, "cmdlist size is too big\n");
581                 return -EINVAL;
582         }
583
584         node = rga_get_cmdlist(rga);
585         if (!node)
586                 return -ENOMEM;
587
588         cmdlist = &node->cmdlist;
589         cmdlist->last = 0;
590
591         /*
592          * Copy the command / buffer registers setting from userspace, each
593          * command have two integer, one for register offset, another for
594          * register value.
595          */
596         if (copy_from_user(cmdlist->data, rga_compat_ptr(req->cmd),
597                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_nr))
598                 return -EFAULT;
599         cmdlist->last += req->cmd_nr * 2;
600
601         if (copy_from_user(&cmdlist->data[cmdlist->last],
602                            rga_compat_ptr(req->cmd_buf),
603                            sizeof(struct drm_rockchip_rga_cmd) * req->cmd_buf_nr))
604                 return -EFAULT;
605         cmdlist->last += req->cmd_buf_nr * 2;
606
607         /*
608          * Check the userspace command registers, and mapping the framebuffer,
609          * create the RGA mmu pages or get the framebuffer dma address.
610          */
611         ret = rga_check_reg_offset(rga->dev, node);
612         if (ret < 0) {
613                 dev_err(rga->dev, "Check reg offset failed\n");
614                 goto err_free_cmdlist;
615         }
616
617         ret = rga_map_cmdlist_gem(rga, node, drm_dev, file);
618         if (ret < 0) {
619                 dev_err(rga->dev, "Failed to map cmdlist\n");
620                 goto err_unmap_cmdlist;
621         }
622
623         rga_add_cmdlist_to_inuse(rga_priv, node);
624
625         return 0;
626
627 err_unmap_cmdlist:
628         rga_unmap_cmdlist_gem(rga, node);
629 err_free_cmdlist:
630         rga_put_cmdlist(rga, node);
631
632         return ret;
633 }
634
635 /*
636  * IOCRL functions for userspace to start RGA transform.
637  */
638 int rockchip_rga_exec_ioctl(struct drm_device *drm_dev, void *data,
639                             struct drm_file *file)
640 {
641         struct rockchip_drm_file_private *file_priv = file->driver_priv;
642         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
643         struct rga_runqueue_node *runqueue;
644         struct rockchip_rga *rga;
645         struct device *dev;
646         int ret;
647
648         if (!rga_priv)
649                 return -ENODEV;
650
651         dev = rga_priv->dev;
652         if (!dev)
653                 return -ENODEV;
654
655         rga = dev_get_drvdata(dev);
656         if (!rga)
657                 return -EFAULT;
658
659         runqueue = kmem_cache_alloc(rga->runqueue_slab, GFP_KERNEL);
660         if (!runqueue) {
661                 dev_err(rga->dev, "failed to allocate memory\n");
662                 return -ENOMEM;
663         }
664
665         runqueue->drm_dev = drm_dev;
666         runqueue->dev = rga->dev;
667
668         init_completion(&runqueue->complete);
669
670         INIT_LIST_HEAD(&runqueue->run_cmdlist);
671
672         list_splice_init(&rga_priv->inuse_cmdlist, &runqueue->run_cmdlist);
673
674         if (list_empty(&runqueue->run_cmdlist)) {
675                 dev_err(rga->dev, "there is no inuse cmdlist\n");
676                 kmem_cache_free(rga->runqueue_slab, runqueue);
677                 return -EPERM;
678         }
679
680         ret = rga_alloc_dma_buf_for_cmdlist(runqueue);
681         if (ret < 0) {
682                 dev_err(rga->dev, "cmdlist init failed\n");
683                 return ret;
684         }
685
686         mutex_lock(&rga->runqueue_mutex);
687         runqueue->pid = current->pid;
688         runqueue->file = file;
689         list_add_tail(&runqueue->list, &rga->runqueue_list);
690         if (!rga->runqueue_node)
691                 rga_exec_runqueue(rga);
692         mutex_unlock(&rga->runqueue_mutex);
693
694         wait_for_completion(&runqueue->complete);
695         rga_free_runqueue_node(rga, runqueue);
696
697         return 0;
698 }
699
700 static int rockchip_rga_open(struct drm_device *drm_dev, struct device *dev,
701                              struct drm_file *file)
702 {
703         struct rockchip_drm_file_private *file_priv = file->driver_priv;
704         struct rockchip_drm_rga_private *rga_priv;
705         struct rockchip_rga *rga;
706
707         rga = dev_get_drvdata(dev);
708         rga->drm_dev = drm_dev;
709
710         rga_priv = kzalloc(sizeof(*rga_priv), GFP_KERNEL);
711         if (!rga_priv)
712                 return -ENOMEM;
713
714         rga_priv->dev = dev;
715         file_priv->rga_priv = rga_priv;
716
717         INIT_LIST_HEAD(&rga_priv->inuse_cmdlist);
718
719         return 0;
720 }
721
722 static void rockchip_rga_close(struct drm_device *drm_dev, struct device *dev,
723                                struct drm_file *file)
724 {
725         struct rockchip_drm_file_private *file_priv = file->driver_priv;
726         struct rockchip_drm_rga_private *rga_priv = file_priv->rga_priv;
727         struct rga_cmdlist_node *node, *n;
728         struct rockchip_rga *rga;
729
730         if (!dev)
731                 return;
732
733         rga = dev_get_drvdata(dev);
734         if (!rga)
735                 return;
736
737         mutex_lock(&rga->cmdlist_mutex);
738         list_for_each_entry_safe(node, n, &rga_priv->inuse_cmdlist, list) {
739                 /*
740                  * unmap all gem objects not completed.
741                  *
742                  * P.S. if current process was terminated forcely then
743                  * there may be some commands in inuse_cmdlist so unmap
744                  * them.
745                  */
746                 rga_unmap_cmdlist_gem(rga, node);
747                 list_move_tail(&node->list, &rga->free_cmdlist);
748         }
749         mutex_unlock(&rga->cmdlist_mutex);
750
751         kfree(file_priv->rga_priv);
752 }
753
754 static void rga_runqueue_worker(struct work_struct *work)
755 {
756         struct rockchip_rga *rga = container_of(work, struct rockchip_rga,
757                                             runqueue_work);
758
759         mutex_lock(&rga->runqueue_mutex);
760         pm_runtime_put_sync(rga->dev);
761
762         complete(&rga->runqueue_node->complete);
763
764         if (rga->suspended)
765                 rga->runqueue_node = NULL;
766         else
767                 rga_exec_runqueue(rga);
768
769         mutex_unlock(&rga->runqueue_mutex);
770 }
771
772 static irqreturn_t rga_irq_handler(int irq, void *dev_id)
773 {
774         struct rockchip_rga *rga = dev_id;
775         int intr;
776
777         intr = rga_read(rga, RGA_INT) & 0xf;
778
779         rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
780
781         if (intr & 0x04)
782                 queue_work(rga->rga_workq, &rga->runqueue_work);
783
784         return IRQ_HANDLED;
785 }
786
787 static int rga_parse_dt(struct rockchip_rga *rga)
788 {
789         struct reset_control *core_rst, *axi_rst, *ahb_rst;
790
791         core_rst = devm_reset_control_get(rga->dev, "core");
792         if (IS_ERR(core_rst)) {
793                 dev_err(rga->dev, "failed to get core reset controller\n");
794                 return PTR_ERR(core_rst);
795         }
796
797         axi_rst = devm_reset_control_get(rga->dev, "axi");
798         if (IS_ERR(axi_rst)) {
799                 dev_err(rga->dev, "failed to get axi reset controller\n");
800                 return PTR_ERR(axi_rst);
801         }
802
803         ahb_rst = devm_reset_control_get(rga->dev, "ahb");
804         if (IS_ERR(ahb_rst)) {
805                 dev_err(rga->dev, "failed to get ahb reset controller\n");
806                 return PTR_ERR(ahb_rst);
807         }
808
809         reset_control_assert(core_rst);
810         udelay(1);
811         reset_control_deassert(core_rst);
812
813         reset_control_assert(axi_rst);
814         udelay(1);
815         reset_control_deassert(axi_rst);
816
817         reset_control_assert(ahb_rst);
818         udelay(1);
819         reset_control_deassert(ahb_rst);
820
821         rga->sclk = devm_clk_get(rga->dev, "sclk");
822         if (IS_ERR(rga->sclk)) {
823                 dev_err(rga->dev, "failed to get sclk clock\n");
824                 return PTR_ERR(rga->sclk);
825         }
826
827         rga->aclk = devm_clk_get(rga->dev, "aclk");
828         if (IS_ERR(rga->aclk)) {
829                 dev_err(rga->dev, "failed to get aclk clock\n");
830                 return PTR_ERR(rga->aclk);
831         }
832
833         rga->hclk = devm_clk_get(rga->dev, "hclk");
834         if (IS_ERR(rga->hclk)) {
835                 dev_err(rga->dev, "failed to get hclk clock\n");
836                 return PTR_ERR(rga->hclk);
837         }
838
839         return rga_enable_clocks(rga);
840 }
841
842 static const struct of_device_id rockchip_rga_dt_ids[] = {
843         { .compatible = "rockchip,rk3288-rga", },
844         { .compatible = "rockchip,rk3228-rga", },
845         { .compatible = "rockchip,rk3399-rga", },
846         {},
847 };
848 MODULE_DEVICE_TABLE(of, rockchip_rga_dt_ids);
849
850 static int rga_probe(struct platform_device *pdev)
851 {
852         struct drm_rockchip_subdrv *subdrv;
853         struct rockchip_rga *rga;
854         struct resource *iores;
855         int irq;
856         int ret;
857
858         if (!pdev->dev.of_node)
859                 return -ENODEV;
860
861         rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
862         if (!rga)
863                 return -ENOMEM;
864
865         rga->dev = &pdev->dev;
866
867         rga->runqueue_slab = kmem_cache_create("rga_runqueue_slab",
868                                                sizeof(struct rga_runqueue_node),
869                                                0, 0, NULL);
870         if (!rga->runqueue_slab)
871                 return -ENOMEM;
872
873         rga->rga_workq = create_singlethread_workqueue("rga");
874         if (!rga->rga_workq) {
875                 dev_err(rga->dev, "failed to create workqueue\n");
876                 ret = -ENOMEM;
877                 goto err_destroy_slab;
878         }
879
880         INIT_WORK(&rga->runqueue_work, rga_runqueue_worker);
881         INIT_LIST_HEAD(&rga->runqueue_list);
882         mutex_init(&rga->runqueue_mutex);
883
884         INIT_LIST_HEAD(&rga->free_cmdlist);
885         mutex_init(&rga->cmdlist_mutex);
886
887         rga_init_cmdlist(rga);
888
889         ret = rga_parse_dt(rga);
890         if (ret) {
891                 dev_err(rga->dev, "Unable to parse OF data\n");
892                 goto err_destroy_workqueue;
893         }
894
895         pm_runtime_enable(rga->dev);
896
897         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
898
899         rga->regs = devm_ioremap_resource(rga->dev, iores);
900         if (IS_ERR(rga->regs)) {
901                 ret = PTR_ERR(rga->regs);
902                 goto err_put_clk;
903         }
904
905         irq = platform_get_irq(pdev, 0);
906         if (irq < 0) {
907                 dev_err(rga->dev, "failed to get irq\n");
908                 ret = irq;
909                 goto err_put_clk;
910         }
911
912         ret = devm_request_irq(rga->dev, irq, rga_irq_handler, 0,
913                                dev_name(rga->dev), rga);
914         if (ret < 0) {
915                 dev_err(rga->dev, "failed to request irq\n");
916                 goto err_put_clk;
917         }
918
919         platform_set_drvdata(pdev, rga);
920
921         rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
922         rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
923
924         subdrv = &rga->subdrv;
925         subdrv->dev = rga->dev;
926         subdrv->open = rockchip_rga_open;
927         subdrv->close = rockchip_rga_close;
928
929         rockchip_drm_register_subdrv(subdrv);
930
931         return 0;
932
933 err_put_clk:
934         pm_runtime_disable(rga->dev);
935 err_destroy_workqueue:
936         destroy_workqueue(rga->rga_workq);
937 err_destroy_slab:
938         kmem_cache_destroy(rga->runqueue_slab);
939
940         return ret;
941 }
942
943 static int rga_remove(struct platform_device *pdev)
944 {
945         struct rockchip_rga *rga = platform_get_drvdata(pdev);
946
947         cancel_work_sync(&rga->runqueue_work);
948
949         while (rga->runqueue_node) {
950                 rga_free_runqueue_node(rga, rga->runqueue_node);
951                 rga->runqueue_node = rga_get_runqueue(rga);
952         }
953
954         rockchip_drm_unregister_subdrv(&rga->subdrv);
955
956         pm_runtime_disable(rga->dev);
957
958         return 0;
959 }
960
961 static int rga_suspend(struct device *dev)
962 {
963         struct rockchip_rga *rga = dev_get_drvdata(dev);
964
965         mutex_lock(&rga->runqueue_mutex);
966         rga->suspended = true;
967         mutex_unlock(&rga->runqueue_mutex);
968
969         flush_work(&rga->runqueue_work);
970
971         return 0;
972 }
973
974 static int rga_resume(struct device *dev)
975 {
976         struct rockchip_rga *rga = dev_get_drvdata(dev);
977
978         rga->suspended = false;
979         rga_exec_runqueue(rga);
980
981         return 0;
982 }
983
984 #ifdef CONFIG_PM
985 static int rga_runtime_suspend(struct device *dev)
986 {
987         struct rockchip_rga *rga = dev_get_drvdata(dev);
988
989         rga_disable_clocks(rga);
990
991         return 0;
992 }
993
994 static int rga_runtime_resume(struct device *dev)
995 {
996         struct rockchip_rga *rga = dev_get_drvdata(dev);
997
998         return rga_enable_clocks(rga);
999 }
1000 #endif
1001
1002 static const struct dev_pm_ops rga_pm = {
1003         SET_SYSTEM_SLEEP_PM_OPS(rga_suspend, rga_resume)
1004         SET_RUNTIME_PM_OPS(rga_runtime_suspend,
1005                            rga_runtime_resume, NULL)
1006 };
1007
1008 static struct platform_driver rga_pltfm_driver = {
1009         .probe  = rga_probe,
1010         .remove = rga_remove,
1011         .driver = {
1012                 .name = "rockchip-rga",
1013                 .pm = &rga_pm,
1014                 .of_match_table = rockchip_rga_dt_ids,
1015         },
1016 };
1017
1018 module_platform_driver(rga_pltfm_driver);
1019
1020 MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
1021 MODULE_DESCRIPTION("Rockchip RGA Driver Extension");
1022 MODULE_LICENSE("GPL");
1023 MODULE_ALIAS("platform:rockchip-rga");