Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code"
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / exynos / exynos_drm_g2d.c
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundationr
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/clk.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 #include <linux/workqueue.h>
20
21 #include "drmP.h"
22 #include "exynos_drm.h"
23 #include "exynos_drm_drv.h"
24 #include "exynos_drm_gem.h"
25
26 #define G2D_HW_MAJOR_VER                4
27 #define G2D_HW_MINOR_VER                1
28
29 /* vaild register range set from user: 0x0104 ~ 0x0880 */
30 #define G2D_VALID_START                 0x0104
31 #define G2D_VALID_END                   0x0880
32
33 /* general registers */
34 #define G2D_SOFT_RESET                  0x0000
35 #define G2D_INTEN                       0x0004
36 #define G2D_INTC_PEND                   0x000C
37 #define G2D_DMA_SFR_BASE_ADDR           0x0080
38 #define G2D_DMA_COMMAND                 0x0084
39 #define G2D_DMA_STATUS                  0x008C
40 #define G2D_DMA_HOLD_CMD                0x0090
41
42 /* command registers */
43 #define G2D_BITBLT_START                0x0100
44
45 /* registers for base address */
46 #define G2D_SRC_BASE_ADDR               0x0304
47 #define G2D_SRC_PLANE2_BASE_ADDR        0x0318
48 #define G2D_DST_BASE_ADDR               0x0404
49 #define G2D_DST_PLANE2_BASE_ADDR        0x0418
50 #define G2D_PAT_BASE_ADDR               0x0500
51 #define G2D_MSK_BASE_ADDR               0x0520
52
53 /* G2D_SOFT_RESET */
54 #define G2D_SFRCLEAR                    (1 << 1)
55 #define G2D_R                           (1 << 0)
56
57 /* G2D_INTEN */
58 #define G2D_INTEN_ACF                   (1 << 3)
59 #define G2D_INTEN_UCF                   (1 << 2)
60 #define G2D_INTEN_GCF                   (1 << 1)
61 #define G2D_INTEN_SCF                   (1 << 0)
62
63 /* G2D_INTC_PEND */
64 #define G2D_INTP_ACMD_FIN               (1 << 3)
65 #define G2D_INTP_UCMD_FIN               (1 << 2)
66 #define G2D_INTP_GCMD_FIN               (1 << 1)
67 #define G2D_INTP_SCMD_FIN               (1 << 0)
68
69 /* G2D_DMA_COMMAND */
70 #define G2D_DMA_HALT                    (1 << 2)
71 #define G2D_DMA_CONTINUE                (1 << 1)
72 #define G2D_DMA_START                   (1 << 0)
73
74 /* G2D_DMA_STATUS */
75 #define G2D_DMA_LIST_DONE_COUNT         (0xFF << 17)
76 #define G2D_DMA_BITBLT_DONE_COUNT       (0xFFFF << 1)
77 #define G2D_DMA_DONE                    (1 << 0)
78 #define G2D_DMA_LIST_DONE_COUNT_OFFSET  17
79
80 /* G2D_DMA_HOLD_CMD */
81 #define G2D_USET_HOLD                   (1 << 2)
82 #define G2D_LIST_HOLD                   (1 << 1)
83 #define G2D_BITBLT_HOLD                 (1 << 0)
84
85 /* G2D_BITBLT_START */
86 #define G2D_START_CASESEL               (1 << 2)
87 #define G2D_START_NHOLT                 (1 << 1)
88 #define G2D_START_BITBLT                (1 << 0)
89
90 #define G2D_CMDLIST_SIZE                (PAGE_SIZE / 4)
91 #define G2D_CMDLIST_NUM                 64
92 #define G2D_CMDLIST_POOL_SIZE           (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
93 #define G2D_CMDLIST_DATA_NUM            (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
94
95 /* cmdlist data structure */
96 struct g2d_cmdlist {
97         u32     head;
98         u32     data[G2D_CMDLIST_DATA_NUM];
99         u32     last;   /* last data offset */
100 };
101
102 struct drm_exynos_pending_g2d_event {
103         struct drm_pending_event        base;
104         struct drm_exynos_g2d_event     event;
105 };
106
107 struct g2d_gem_node {
108         struct list_head        list;
109         unsigned int            handle;
110 };
111
112 struct g2d_cmdlist_node {
113         struct list_head        list;
114         struct g2d_cmdlist      *cmdlist;
115         unsigned int            gem_nr;
116         dma_addr_t              dma_addr;
117
118         struct drm_exynos_pending_g2d_event     *event;
119 };
120
121 struct g2d_runqueue_node {
122         struct list_head        list;
123         struct list_head        run_cmdlist;
124         struct list_head        event_list;
125         struct completion       complete;
126         int                     async;
127 };
128
129 struct g2d_data {
130         struct device                   *dev;
131         struct clk                      *gate_clk;
132         struct resource                 *regs_res;
133         void __iomem                    *regs;
134         int                             irq;
135         struct workqueue_struct         *g2d_workq;
136         struct work_struct              runqueue_work;
137         struct exynos_drm_subdrv        subdrv;
138         bool                            suspended;
139
140         /* cmdlist */
141         struct g2d_cmdlist_node         *cmdlist_node;
142         struct list_head                free_cmdlist;
143         struct mutex                    cmdlist_mutex;
144         dma_addr_t                      cmdlist_pool;
145         void                            *cmdlist_pool_virt;
146
147         /* runqueue*/
148         struct g2d_runqueue_node        *runqueue_node;
149         struct list_head                runqueue;
150         struct mutex                    runqueue_mutex;
151         struct kmem_cache               *runqueue_slab;
152 };
153
154 static int g2d_init_cmdlist(struct g2d_data *g2d)
155 {
156         struct device *dev = g2d->dev;
157         struct g2d_cmdlist_node *node = g2d->cmdlist_node;
158         int nr;
159         int ret;
160
161         g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
162                                                 &g2d->cmdlist_pool, GFP_KERNEL);
163         if (!g2d->cmdlist_pool_virt) {
164                 dev_err(dev, "failed to allocate dma memory\n");
165                 return -ENOMEM;
166         }
167
168         node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node),
169                         GFP_KERNEL);
170         if (!node) {
171                 dev_err(dev, "failed to allocate memory\n");
172                 ret = -ENOMEM;
173                 goto err;
174         }
175
176         for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
177                 node[nr].cmdlist =
178                         g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
179                 node[nr].dma_addr =
180                         g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
181
182                 list_add_tail(&node[nr].list, &g2d->free_cmdlist);
183         }
184
185         return 0;
186
187 err:
188         dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
189                         g2d->cmdlist_pool);
190         return ret;
191 }
192
193 static void g2d_fini_cmdlist(struct g2d_data *g2d)
194 {
195         struct device *dev = g2d->dev;
196
197         kfree(g2d->cmdlist_node);
198         dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
199                         g2d->cmdlist_pool);
200 }
201
202 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
203 {
204         struct device *dev = g2d->dev;
205         struct g2d_cmdlist_node *node;
206
207         mutex_lock(&g2d->cmdlist_mutex);
208         if (list_empty(&g2d->free_cmdlist)) {
209                 dev_err(dev, "there is no free cmdlist\n");
210                 mutex_unlock(&g2d->cmdlist_mutex);
211                 return NULL;
212         }
213
214         node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
215                                 list);
216         list_del_init(&node->list);
217         mutex_unlock(&g2d->cmdlist_mutex);
218
219         return node;
220 }
221
222 static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
223 {
224         mutex_lock(&g2d->cmdlist_mutex);
225         list_move_tail(&node->list, &g2d->free_cmdlist);
226         mutex_unlock(&g2d->cmdlist_mutex);
227 }
228
229 static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
230                                      struct g2d_cmdlist_node *node)
231 {
232         struct g2d_cmdlist_node *lnode;
233
234         if (list_empty(&g2d_priv->inuse_cmdlist))
235                 goto add_to_list;
236
237         /* this links to base address of new cmdlist */
238         lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
239                                 struct g2d_cmdlist_node, list);
240         lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
241
242 add_to_list:
243         list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
244
245         if (node->event)
246                 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
247 }
248
249 static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
250                                struct drm_file *file,
251                                struct g2d_cmdlist_node *node)
252 {
253         struct drm_exynos_file_private *file_priv = file->driver_priv;
254         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
255         struct g2d_cmdlist *cmdlist = node->cmdlist;
256         dma_addr_t *addr;
257         int offset;
258         int i;
259
260         for (i = 0; i < node->gem_nr; i++) {
261                 struct g2d_gem_node *gem_node;
262
263                 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
264                 if (!gem_node) {
265                         dev_err(g2d_priv->dev, "failed to allocate gem node\n");
266                         return -ENOMEM;
267                 }
268
269                 offset = cmdlist->last - (i * 2 + 1);
270                 gem_node->handle = cmdlist->data[offset];
271
272                 addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
273                                                    file);
274                 if (IS_ERR(addr)) {
275                         node->gem_nr = i;
276                         kfree(gem_node);
277                         return PTR_ERR(addr);
278                 }
279
280                 cmdlist->data[offset] = *addr;
281                 list_add_tail(&gem_node->list, &g2d_priv->gem_list);
282                 g2d_priv->gem_nr++;
283         }
284
285         return 0;
286 }
287
288 static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
289                                 struct drm_file *file,
290                                 unsigned int nr)
291 {
292         struct drm_exynos_file_private *file_priv = file->driver_priv;
293         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
294         struct g2d_gem_node *node, *n;
295
296         list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
297                 if (!nr)
298                         break;
299
300                 exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
301                 list_del_init(&node->list);
302                 kfree(node);
303                 nr--;
304         }
305 }
306
307 static void g2d_dma_start(struct g2d_data *g2d,
308                           struct g2d_runqueue_node *runqueue_node)
309 {
310         struct g2d_cmdlist_node *node =
311                                 list_first_entry(&runqueue_node->run_cmdlist,
312                                                 struct g2d_cmdlist_node, list);
313
314         pm_runtime_get_sync(g2d->dev);
315         clk_enable(g2d->gate_clk);
316
317         /* interrupt enable */
318         writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
319                         g2d->regs + G2D_INTEN);
320
321         writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
322         writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
323 }
324
325 static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
326 {
327         struct g2d_runqueue_node *runqueue_node;
328
329         if (list_empty(&g2d->runqueue))
330                 return NULL;
331
332         runqueue_node = list_first_entry(&g2d->runqueue,
333                                          struct g2d_runqueue_node, list);
334         list_del_init(&runqueue_node->list);
335         return runqueue_node;
336 }
337
338 static void g2d_free_runqueue_node(struct g2d_data *g2d,
339                                    struct g2d_runqueue_node *runqueue_node)
340 {
341         if (!runqueue_node)
342                 return;
343
344         mutex_lock(&g2d->cmdlist_mutex);
345         list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
346         mutex_unlock(&g2d->cmdlist_mutex);
347
348         kmem_cache_free(g2d->runqueue_slab, runqueue_node);
349 }
350
351 static void g2d_exec_runqueue(struct g2d_data *g2d)
352 {
353         g2d->runqueue_node = g2d_get_runqueue_node(g2d);
354         if (g2d->runqueue_node)
355                 g2d_dma_start(g2d, g2d->runqueue_node);
356 }
357
358 static void g2d_runqueue_worker(struct work_struct *work)
359 {
360         struct g2d_data *g2d = container_of(work, struct g2d_data,
361                                             runqueue_work);
362
363
364         mutex_lock(&g2d->runqueue_mutex);
365         clk_disable(g2d->gate_clk);
366         pm_runtime_put_sync(g2d->dev);
367
368         complete(&g2d->runqueue_node->complete);
369         if (g2d->runqueue_node->async)
370                 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
371
372         if (g2d->suspended)
373                 g2d->runqueue_node = NULL;
374         else
375                 g2d_exec_runqueue(g2d);
376         mutex_unlock(&g2d->runqueue_mutex);
377 }
378
379 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
380 {
381         struct drm_device *drm_dev = g2d->subdrv.drm_dev;
382         struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
383         struct drm_exynos_pending_g2d_event *e;
384         struct timeval now;
385         unsigned long flags;
386
387         if (list_empty(&runqueue_node->event_list))
388                 return;
389
390         e = list_first_entry(&runqueue_node->event_list,
391                              struct drm_exynos_pending_g2d_event, base.link);
392
393         do_gettimeofday(&now);
394         e->event.tv_sec = now.tv_sec;
395         e->event.tv_usec = now.tv_usec;
396         e->event.cmdlist_no = cmdlist_no;
397
398         spin_lock_irqsave(&drm_dev->event_lock, flags);
399         list_move_tail(&e->base.link, &e->base.file_priv->event_list);
400         wake_up_interruptible(&e->base.file_priv->event_wait);
401         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
402 }
403
404 static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
405 {
406         struct g2d_data *g2d = dev_id;
407         u32 pending;
408
409         pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
410         if (pending)
411                 writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
412
413         if (pending & G2D_INTP_GCMD_FIN) {
414                 u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
415
416                 cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
417                                                 G2D_DMA_LIST_DONE_COUNT_OFFSET;
418
419                 g2d_finish_event(g2d, cmdlist_no);
420
421                 writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
422                 if (!(pending & G2D_INTP_ACMD_FIN)) {
423                         writel_relaxed(G2D_DMA_CONTINUE,
424                                         g2d->regs + G2D_DMA_COMMAND);
425                 }
426         }
427
428         if (pending & G2D_INTP_ACMD_FIN)
429                 queue_work(g2d->g2d_workq, &g2d->runqueue_work);
430
431         return IRQ_HANDLED;
432 }
433
434 static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
435                                 int nr, bool for_addr)
436 {
437         int reg_offset;
438         int index;
439         int i;
440
441         for (i = 0; i < nr; i++) {
442                 index = cmdlist->last - 2 * (i + 1);
443                 reg_offset = cmdlist->data[index] & ~0xfffff000;
444
445                 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
446                         goto err;
447                 if (reg_offset % 4)
448                         goto err;
449
450                 switch (reg_offset) {
451                 case G2D_SRC_BASE_ADDR:
452                 case G2D_SRC_PLANE2_BASE_ADDR:
453                 case G2D_DST_BASE_ADDR:
454                 case G2D_DST_PLANE2_BASE_ADDR:
455                 case G2D_PAT_BASE_ADDR:
456                 case G2D_MSK_BASE_ADDR:
457                         if (!for_addr)
458                                 goto err;
459                         break;
460                 default:
461                         if (for_addr)
462                                 goto err;
463                         break;
464                 }
465         }
466
467         return 0;
468
469 err:
470         dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
471         return -EINVAL;
472 }
473
474 /* ioctl functions */
475 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
476                              struct drm_file *file)
477 {
478         struct drm_exynos_g2d_get_ver *ver = data;
479
480         ver->major = G2D_HW_MAJOR_VER;
481         ver->minor = G2D_HW_MINOR_VER;
482
483         return 0;
484 }
485 EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
486
487 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
488                                  struct drm_file *file)
489 {
490         struct drm_exynos_file_private *file_priv = file->driver_priv;
491         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
492         struct device *dev = g2d_priv->dev;
493         struct g2d_data *g2d;
494         struct drm_exynos_g2d_set_cmdlist *req = data;
495         struct drm_exynos_g2d_cmd *cmd;
496         struct drm_exynos_pending_g2d_event *e;
497         struct g2d_cmdlist_node *node;
498         struct g2d_cmdlist *cmdlist;
499         unsigned long flags;
500         int size;
501         int ret;
502
503         if (!dev)
504                 return -ENODEV;
505
506         g2d = dev_get_drvdata(dev);
507         if (!g2d)
508                 return -EFAULT;
509
510         node = g2d_get_cmdlist(g2d);
511         if (!node)
512                 return -ENOMEM;
513
514         node->event = NULL;
515
516         if (req->event_type != G2D_EVENT_NOT) {
517                 spin_lock_irqsave(&drm_dev->event_lock, flags);
518                 if (file->event_space < sizeof(e->event)) {
519                         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
520                         ret = -ENOMEM;
521                         goto err;
522                 }
523                 file->event_space -= sizeof(e->event);
524                 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
525
526                 e = kzalloc(sizeof(*node->event), GFP_KERNEL);
527                 if (!e) {
528                         dev_err(dev, "failed to allocate event\n");
529
530                         spin_lock_irqsave(&drm_dev->event_lock, flags);
531                         file->event_space += sizeof(e->event);
532                         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
533
534                         ret = -ENOMEM;
535                         goto err;
536                 }
537
538                 e->event.base.type = DRM_EXYNOS_G2D_EVENT;
539                 e->event.base.length = sizeof(e->event);
540                 e->event.user_data = req->user_data;
541                 e->base.event = &e->event.base;
542                 e->base.file_priv = file;
543                 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
544
545                 node->event = e;
546         }
547
548         cmdlist = node->cmdlist;
549
550         cmdlist->last = 0;
551
552         /*
553          * If don't clear SFR registers, the cmdlist is affected by register
554          * values of previous cmdlist. G2D hw executes SFR clear command and
555          * a next command at the same time then the next command is ignored and
556          * is executed rightly from next next command, so needs a dummy command
557          * to next command of SFR clear command.
558          */
559         cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
560         cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
561         cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
562         cmdlist->data[cmdlist->last++] = 0;
563
564         if (node->event) {
565                 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
566                 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
567         }
568
569         /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
570         size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
571         if (size > G2D_CMDLIST_DATA_NUM) {
572                 dev_err(dev, "cmdlist size is too big\n");
573                 ret = -EINVAL;
574                 goto err_free_event;
575         }
576
577         cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
578
579         if (copy_from_user(cmdlist->data + cmdlist->last,
580                                 (void __user *)cmd,
581                                 sizeof(*cmd) * req->cmd_nr)) {
582                 ret = -EFAULT;
583                 goto err_free_event;
584         }
585         cmdlist->last += req->cmd_nr * 2;
586
587         ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
588         if (ret < 0)
589                 goto err_free_event;
590
591         node->gem_nr = req->cmd_gem_nr;
592         if (req->cmd_gem_nr) {
593                 struct drm_exynos_g2d_cmd *cmd_gem;
594
595                 cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
596
597                 if (copy_from_user(cmdlist->data + cmdlist->last,
598                                         (void __user *)cmd_gem,
599                                         sizeof(*cmd_gem) * req->cmd_gem_nr)) {
600                         ret = -EFAULT;
601                         goto err_free_event;
602                 }
603                 cmdlist->last += req->cmd_gem_nr * 2;
604
605                 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
606                 if (ret < 0)
607                         goto err_free_event;
608
609                 ret = g2d_get_cmdlist_gem(drm_dev, file, node);
610                 if (ret < 0)
611                         goto err_unmap;
612         }
613
614         cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
615         cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
616
617         /* head */
618         cmdlist->head = cmdlist->last / 2;
619
620         /* tail */
621         cmdlist->data[cmdlist->last] = 0;
622
623         g2d_add_cmdlist_to_inuse(g2d_priv, node);
624
625         return 0;
626
627 err_unmap:
628         g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
629 err_free_event:
630         if (node->event) {
631                 spin_lock_irqsave(&drm_dev->event_lock, flags);
632                 file->event_space += sizeof(e->event);
633                 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
634                 kfree(node->event);
635         }
636 err:
637         g2d_put_cmdlist(g2d, node);
638         return ret;
639 }
640 EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
641
642 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
643                           struct drm_file *file)
644 {
645         struct drm_exynos_file_private *file_priv = file->driver_priv;
646         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
647         struct device *dev = g2d_priv->dev;
648         struct g2d_data *g2d;
649         struct drm_exynos_g2d_exec *req = data;
650         struct g2d_runqueue_node *runqueue_node;
651         struct list_head *run_cmdlist;
652         struct list_head *event_list;
653
654         if (!dev)
655                 return -ENODEV;
656
657         g2d = dev_get_drvdata(dev);
658         if (!g2d)
659                 return -EFAULT;
660
661         runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
662         if (!runqueue_node) {
663                 dev_err(dev, "failed to allocate memory\n");
664                 return -ENOMEM;
665         }
666         run_cmdlist = &runqueue_node->run_cmdlist;
667         event_list = &runqueue_node->event_list;
668         INIT_LIST_HEAD(run_cmdlist);
669         INIT_LIST_HEAD(event_list);
670         init_completion(&runqueue_node->complete);
671         runqueue_node->async = req->async;
672
673         list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
674         list_splice_init(&g2d_priv->event_list, event_list);
675
676         if (list_empty(run_cmdlist)) {
677                 dev_err(dev, "there is no inuse cmdlist\n");
678                 kmem_cache_free(g2d->runqueue_slab, runqueue_node);
679                 return -EPERM;
680         }
681
682         mutex_lock(&g2d->runqueue_mutex);
683         list_add_tail(&runqueue_node->list, &g2d->runqueue);
684         if (!g2d->runqueue_node)
685                 g2d_exec_runqueue(g2d);
686         mutex_unlock(&g2d->runqueue_mutex);
687
688         if (runqueue_node->async)
689                 goto out;
690
691         wait_for_completion(&runqueue_node->complete);
692         g2d_free_runqueue_node(g2d, runqueue_node);
693
694 out:
695         return 0;
696 }
697 EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
698
699 static int g2d_open(struct drm_device *drm_dev, struct device *dev,
700                         struct drm_file *file)
701 {
702         struct drm_exynos_file_private *file_priv = file->driver_priv;
703         struct exynos_drm_g2d_private *g2d_priv;
704
705         g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
706         if (!g2d_priv) {
707                 dev_err(dev, "failed to allocate g2d private data\n");
708                 return -ENOMEM;
709         }
710
711         g2d_priv->dev = dev;
712         file_priv->g2d_priv = g2d_priv;
713
714         INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
715         INIT_LIST_HEAD(&g2d_priv->event_list);
716         INIT_LIST_HEAD(&g2d_priv->gem_list);
717
718         return 0;
719 }
720
721 static void g2d_close(struct drm_device *drm_dev, struct device *dev,
722                         struct drm_file *file)
723 {
724         struct drm_exynos_file_private *file_priv = file->driver_priv;
725         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
726         struct g2d_data *g2d;
727         struct g2d_cmdlist_node *node, *n;
728
729         if (!dev)
730                 return;
731
732         g2d = dev_get_drvdata(dev);
733         if (!g2d)
734                 return;
735
736         mutex_lock(&g2d->cmdlist_mutex);
737         list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
738                 list_move_tail(&node->list, &g2d->free_cmdlist);
739         mutex_unlock(&g2d->cmdlist_mutex);
740
741         g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
742
743         kfree(file_priv->g2d_priv);
744 }
745
746 static int __devinit g2d_probe(struct platform_device *pdev)
747 {
748         struct device *dev = &pdev->dev;
749         struct resource *res;
750         struct g2d_data *g2d;
751         struct exynos_drm_subdrv *subdrv;
752         int ret;
753
754         g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
755         if (!g2d) {
756                 dev_err(dev, "failed to allocate driver data\n");
757                 return -ENOMEM;
758         }
759
760         g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
761                         sizeof(struct g2d_runqueue_node), 0, 0, NULL);
762         if (!g2d->runqueue_slab) {
763                 ret = -ENOMEM;
764                 goto err_free_mem;
765         }
766
767         g2d->dev = dev;
768
769         g2d->g2d_workq = create_singlethread_workqueue("g2d");
770         if (!g2d->g2d_workq) {
771                 dev_err(dev, "failed to create workqueue\n");
772                 ret = -EINVAL;
773                 goto err_destroy_slab;
774         }
775
776         INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
777         INIT_LIST_HEAD(&g2d->free_cmdlist);
778         INIT_LIST_HEAD(&g2d->runqueue);
779
780         mutex_init(&g2d->cmdlist_mutex);
781         mutex_init(&g2d->runqueue_mutex);
782
783         ret = g2d_init_cmdlist(g2d);
784         if (ret < 0)
785                 goto err_destroy_workqueue;
786
787         g2d->gate_clk = clk_get(dev, "fimg2d");
788         if (IS_ERR(g2d->gate_clk)) {
789                 dev_err(dev, "failed to get gate clock\n");
790                 ret = PTR_ERR(g2d->gate_clk);
791                 goto err_fini_cmdlist;
792         }
793
794         pm_runtime_enable(dev);
795
796         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
797         if (!res) {
798                 dev_err(dev, "failed to get I/O memory\n");
799                 ret = -ENOENT;
800                 goto err_put_clk;
801         }
802
803         g2d->regs_res = request_mem_region(res->start, resource_size(res),
804                                            dev_name(dev));
805         if (!g2d->regs_res) {
806                 dev_err(dev, "failed to request I/O memory\n");
807                 ret = -ENOENT;
808                 goto err_put_clk;
809         }
810
811         g2d->regs = ioremap(res->start, resource_size(res));
812         if (!g2d->regs) {
813                 dev_err(dev, "failed to remap I/O memory\n");
814                 ret = -ENXIO;
815                 goto err_release_res;
816         }
817
818         g2d->irq = platform_get_irq(pdev, 0);
819         if (g2d->irq < 0) {
820                 dev_err(dev, "failed to get irq\n");
821                 ret = g2d->irq;
822                 goto err_unmap_base;
823         }
824
825         ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
826         if (ret < 0) {
827                 dev_err(dev, "irq request failed\n");
828                 goto err_unmap_base;
829         }
830
831         platform_set_drvdata(pdev, g2d);
832
833         subdrv = &g2d->subdrv;
834         subdrv->dev = dev;
835         subdrv->open = g2d_open;
836         subdrv->close = g2d_close;
837
838         ret = exynos_drm_subdrv_register(subdrv);
839         if (ret < 0) {
840                 dev_err(dev, "failed to register drm g2d device\n");
841                 goto err_free_irq;
842         }
843
844         dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
845                         G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
846
847         return 0;
848
849 err_free_irq:
850         free_irq(g2d->irq, g2d);
851 err_unmap_base:
852         iounmap(g2d->regs);
853 err_release_res:
854         release_resource(g2d->regs_res);
855         kfree(g2d->regs_res);
856 err_put_clk:
857         pm_runtime_disable(dev);
858         clk_put(g2d->gate_clk);
859 err_fini_cmdlist:
860         g2d_fini_cmdlist(g2d);
861 err_destroy_workqueue:
862         destroy_workqueue(g2d->g2d_workq);
863 err_destroy_slab:
864         kmem_cache_destroy(g2d->runqueue_slab);
865 err_free_mem:
866         kfree(g2d);
867         return ret;
868 }
869
870 static int __devexit g2d_remove(struct platform_device *pdev)
871 {
872         struct g2d_data *g2d = platform_get_drvdata(pdev);
873
874         cancel_work_sync(&g2d->runqueue_work);
875         exynos_drm_subdrv_unregister(&g2d->subdrv);
876         free_irq(g2d->irq, g2d);
877
878         while (g2d->runqueue_node) {
879                 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
880                 g2d->runqueue_node = g2d_get_runqueue_node(g2d);
881         }
882
883         iounmap(g2d->regs);
884         release_resource(g2d->regs_res);
885         kfree(g2d->regs_res);
886
887         pm_runtime_disable(&pdev->dev);
888         clk_put(g2d->gate_clk);
889
890         g2d_fini_cmdlist(g2d);
891         destroy_workqueue(g2d->g2d_workq);
892         kmem_cache_destroy(g2d->runqueue_slab);
893         kfree(g2d);
894
895         return 0;
896 }
897
898 #ifdef CONFIG_PM_SLEEP
899 static int g2d_suspend(struct device *dev)
900 {
901         struct g2d_data *g2d = dev_get_drvdata(dev);
902
903         mutex_lock(&g2d->runqueue_mutex);
904         g2d->suspended = true;
905         mutex_unlock(&g2d->runqueue_mutex);
906
907         while (g2d->runqueue_node)
908                 /* FIXME: good range? */
909                 usleep_range(500, 1000);
910
911         flush_work_sync(&g2d->runqueue_work);
912
913         return 0;
914 }
915
916 static int g2d_resume(struct device *dev)
917 {
918         struct g2d_data *g2d = dev_get_drvdata(dev);
919
920         g2d->suspended = false;
921         g2d_exec_runqueue(g2d);
922
923         return 0;
924 }
925 #endif
926
927 SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
928
929 struct platform_driver g2d_driver = {
930         .probe          = g2d_probe,
931         .remove         = __devexit_p(g2d_remove),
932         .driver         = {
933                 .name   = "s5p-g2d",
934                 .owner  = THIS_MODULE,
935                 .pm     = &g2d_pm_ops,
936         },
937 };