OMAPDSS: APPLY: configure_* funcs take ovl/manager as args
[firefly-linux-kernel-4.4.55.git] / drivers / video / omap2 / dss / apply.c
1 /*
2  * Copyright (C) 2011 Texas Instruments
3  * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #define DSS_SUBSYS_NAME "APPLY"
19
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/jiffies.h>
24
25 #include <video/omapdss.h>
26
27 #include "dss.h"
28 #include "dss_features.h"
29
30 /*
31  * We have 4 levels of cache for the dispc settings. First two are in SW and
32  * the latter two in HW.
33  *
34  * +--------------------+
35  * |overlay/manager_info|
36  * +--------------------+
37  *          v
38  *        apply()
39  *          v
40  * +--------------------+
41  * |     dss_cache      |
42  * +--------------------+
43  *          v
44  *      write_regs()
45  *          v
46  * +--------------------+
47  * |  shadow registers  |
48  * +--------------------+
49  *          v
50  * VFP or lcd/digit_enable
51  *          v
52  * +--------------------+
53  * |      registers     |
54  * +--------------------+
55  */
56
57 struct overlay_cache_data {
58         /* If true, cache changed, but not written to shadow registers. Set
59          * in apply(), cleared when registers written. */
60         bool dirty;
61         /* If true, shadow registers contain changed values not yet in real
62          * registers. Set when writing to shadow registers, cleared at
63          * VSYNC/EVSYNC */
64         bool shadow_dirty;
65
66         bool enabled;
67
68         struct omap_overlay_info info;
69
70         enum omap_channel channel;
71
72         u32 fifo_low;
73         u32 fifo_high;
74 };
75
76 struct manager_cache_data {
77         /* If true, cache changed, but not written to shadow registers. Set
78          * in apply(), cleared when registers written. */
79         bool dirty;
80         /* If true, shadow registers contain changed values not yet in real
81          * registers. Set when writing to shadow registers, cleared at
82          * VSYNC/EVSYNC */
83         bool shadow_dirty;
84
85         struct omap_overlay_manager_info info;
86
87         bool manual_update;
88         bool do_manual_update;
89 };
90
91 static struct {
92         spinlock_t lock;
93         struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
94         struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
95
96         bool irq_enabled;
97 } dss_cache;
98
99 void dss_apply_init(void)
100 {
101         spin_lock_init(&dss_cache.lock);
102 }
103
104 static bool ovl_manual_update(struct omap_overlay *ovl)
105 {
106         return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
107 }
108
109 static bool mgr_manual_update(struct omap_overlay_manager *mgr)
110 {
111         return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
112 }
113
114 static int overlay_enabled(struct omap_overlay *ovl)
115 {
116         return ovl->info.enabled && ovl->manager && ovl->manager->device;
117 }
118
119 int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
120 {
121         unsigned long timeout = msecs_to_jiffies(500);
122         struct manager_cache_data *mc;
123         u32 irq;
124         int r;
125         int i;
126         struct omap_dss_device *dssdev = mgr->device;
127
128         if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
129                 return 0;
130
131         if (mgr_manual_update(mgr))
132                 return 0;
133
134         irq = dispc_mgr_get_vsync_irq(mgr->id);
135
136         mc = &dss_cache.manager_cache[mgr->id];
137         i = 0;
138         while (1) {
139                 unsigned long flags;
140                 bool shadow_dirty, dirty;
141
142                 spin_lock_irqsave(&dss_cache.lock, flags);
143                 dirty = mc->dirty;
144                 shadow_dirty = mc->shadow_dirty;
145                 spin_unlock_irqrestore(&dss_cache.lock, flags);
146
147                 if (!dirty && !shadow_dirty) {
148                         r = 0;
149                         break;
150                 }
151
152                 /* 4 iterations is the worst case:
153                  * 1 - initial iteration, dirty = true (between VFP and VSYNC)
154                  * 2 - first VSYNC, dirty = true
155                  * 3 - dirty = false, shadow_dirty = true
156                  * 4 - shadow_dirty = false */
157                 if (i++ == 3) {
158                         DSSERR("mgr(%d)->wait_for_go() not finishing\n",
159                                         mgr->id);
160                         r = 0;
161                         break;
162                 }
163
164                 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
165                 if (r == -ERESTARTSYS)
166                         break;
167
168                 if (r) {
169                         DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
170                         break;
171                 }
172         }
173
174         return r;
175 }
176
177 int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
178 {
179         unsigned long timeout = msecs_to_jiffies(500);
180         struct overlay_cache_data *oc;
181         struct omap_dss_device *dssdev;
182         u32 irq;
183         int r;
184         int i;
185
186         if (!ovl->manager)
187                 return 0;
188
189         dssdev = ovl->manager->device;
190
191         if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
192                 return 0;
193
194         if (ovl_manual_update(ovl))
195                 return 0;
196
197         irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
198
199         oc = &dss_cache.overlay_cache[ovl->id];
200         i = 0;
201         while (1) {
202                 unsigned long flags;
203                 bool shadow_dirty, dirty;
204
205                 spin_lock_irqsave(&dss_cache.lock, flags);
206                 dirty = oc->dirty;
207                 shadow_dirty = oc->shadow_dirty;
208                 spin_unlock_irqrestore(&dss_cache.lock, flags);
209
210                 if (!dirty && !shadow_dirty) {
211                         r = 0;
212                         break;
213                 }
214
215                 /* 4 iterations is the worst case:
216                  * 1 - initial iteration, dirty = true (between VFP and VSYNC)
217                  * 2 - first VSYNC, dirty = true
218                  * 3 - dirty = false, shadow_dirty = true
219                  * 4 - shadow_dirty = false */
220                 if (i++ == 3) {
221                         DSSERR("ovl(%d)->wait_for_go() not finishing\n",
222                                         ovl->id);
223                         r = 0;
224                         break;
225                 }
226
227                 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
228                 if (r == -ERESTARTSYS)
229                         break;
230
231                 if (r) {
232                         DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
233                         break;
234                 }
235         }
236
237         return r;
238 }
239
240 static int dss_ovl_write_regs(struct omap_overlay *ovl)
241 {
242         struct overlay_cache_data *c;
243         struct omap_overlay_info *oi;
244         bool ilace, replication;
245         int r;
246
247         DSSDBGF("%d", ovl->id);
248
249         c = &dss_cache.overlay_cache[ovl->id];
250         oi = &c->info;
251
252         if (!c->enabled) {
253                 dispc_ovl_enable(ovl->id, 0);
254                 return 0;
255         }
256
257         replication = dss_use_replication(ovl->manager->device, oi->color_mode);
258
259         ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
260
261         dispc_ovl_set_channel_out(ovl->id, c->channel);
262
263         r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
264         if (r) {
265                 /* this shouldn't happen */
266                 DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
267                 dispc_ovl_enable(ovl->id, 0);
268                 return r;
269         }
270
271         dispc_ovl_set_fifo_threshold(ovl->id, c->fifo_low, c->fifo_high);
272
273         dispc_ovl_enable(ovl->id, 1);
274
275         return 0;
276 }
277
278 static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
279 {
280         struct omap_overlay_manager_info *mi;
281
282         DSSDBGF("%d", mgr->id);
283
284         mi = &dss_cache.manager_cache[mgr->id].info;
285
286         dispc_mgr_setup(mgr->id, mi);
287 }
288
289 /* dss_write_regs() tries to write values from cache to shadow registers.
290  * It writes only to those managers/overlays that are not busy.
291  * returns 0 if everything could be written to shadow registers.
292  * returns 1 if not everything could be written to shadow registers. */
293 static int dss_write_regs(void)
294 {
295         struct omap_overlay *ovl;
296         struct omap_overlay_manager *mgr;
297         struct overlay_cache_data *oc;
298         struct manager_cache_data *mc;
299         const int num_ovls = dss_feat_get_num_ovls();
300         const int num_mgrs = dss_feat_get_num_mgrs();
301         int i;
302         int r;
303         bool mgr_busy[MAX_DSS_MANAGERS];
304         bool mgr_go[MAX_DSS_MANAGERS];
305         bool busy;
306
307         r = 0;
308         busy = false;
309
310         for (i = 0; i < num_mgrs; i++) {
311                 mgr_busy[i] = dispc_mgr_go_busy(i);
312                 mgr_go[i] = false;
313         }
314
315         /* Commit overlay settings */
316         for (i = 0; i < num_ovls; ++i) {
317                 ovl = omap_dss_get_overlay(i);
318                 oc = &dss_cache.overlay_cache[i];
319                 mc = &dss_cache.manager_cache[oc->channel];
320
321                 if (!oc->dirty)
322                         continue;
323
324                 if (mc->manual_update && !mc->do_manual_update)
325                         continue;
326
327                 if (mgr_busy[oc->channel]) {
328                         busy = true;
329                         continue;
330                 }
331
332                 r = dss_ovl_write_regs(ovl);
333                 if (r)
334                         DSSERR("dss_ovl_write_regs %d failed\n", i);
335
336                 oc->dirty = false;
337                 oc->shadow_dirty = true;
338                 mgr_go[oc->channel] = true;
339         }
340
341         /* Commit manager settings */
342         for (i = 0; i < num_mgrs; ++i) {
343                 mgr = omap_dss_get_overlay_manager(i);
344                 mc = &dss_cache.manager_cache[i];
345
346                 if (!mc->dirty)
347                         continue;
348
349                 if (mc->manual_update && !mc->do_manual_update)
350                         continue;
351
352                 if (mgr_busy[i]) {
353                         busy = true;
354                         continue;
355                 }
356
357                 dss_mgr_write_regs(mgr);
358                 mc->dirty = false;
359                 mc->shadow_dirty = true;
360                 mgr_go[i] = true;
361         }
362
363         /* set GO */
364         for (i = 0; i < num_mgrs; ++i) {
365                 mc = &dss_cache.manager_cache[i];
366
367                 if (!mgr_go[i])
368                         continue;
369
370                 /* We don't need GO with manual update display. LCD iface will
371                  * always be turned off after frame, and new settings will be
372                  * taken in to use at next update */
373                 if (!mc->manual_update)
374                         dispc_mgr_go(i);
375         }
376
377         if (busy)
378                 r = 1;
379         else
380                 r = 0;
381
382         return r;
383 }
384
385 void dss_mgr_start_update(struct omap_overlay_manager *mgr)
386 {
387         struct manager_cache_data *mc;
388         struct overlay_cache_data *oc;
389         struct omap_overlay *ovl;
390
391         mc = &dss_cache.manager_cache[mgr->id];
392
393         mc->do_manual_update = true;
394         dss_write_regs();
395         mc->do_manual_update = false;
396
397         list_for_each_entry(ovl, &mgr->overlays, list) {
398                 oc = &dss_cache.overlay_cache[ovl->id];
399                 oc->shadow_dirty = false;
400         }
401
402         mc = &dss_cache.manager_cache[mgr->id];
403         mc->shadow_dirty = false;
404
405         dispc_mgr_enable(mgr->id, true);
406 }
407
408 static void dss_apply_irq_handler(void *data, u32 mask);
409
410 static void dss_register_vsync_isr(void)
411 {
412         const int num_mgrs = dss_feat_get_num_mgrs();
413         u32 mask;
414         int r, i;
415
416         mask = 0;
417         for (i = 0; i < num_mgrs; ++i)
418                 mask |= dispc_mgr_get_vsync_irq(i);
419
420         r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
421         WARN_ON(r);
422
423         dss_cache.irq_enabled = true;
424 }
425
426 static void dss_unregister_vsync_isr(void)
427 {
428         const int num_mgrs = dss_feat_get_num_mgrs();
429         u32 mask;
430         int r, i;
431
432         mask = 0;
433         for (i = 0; i < num_mgrs; ++i)
434                 mask |= dispc_mgr_get_vsync_irq(i);
435
436         r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
437         WARN_ON(r);
438
439         dss_cache.irq_enabled = false;
440 }
441
442 static void dss_apply_irq_handler(void *data, u32 mask)
443 {
444         struct manager_cache_data *mc;
445         struct overlay_cache_data *oc;
446         const int num_ovls = dss_feat_get_num_ovls();
447         const int num_mgrs = dss_feat_get_num_mgrs();
448         int i, r;
449         bool mgr_busy[MAX_DSS_MANAGERS];
450
451         for (i = 0; i < num_mgrs; i++)
452                 mgr_busy[i] = dispc_mgr_go_busy(i);
453
454         spin_lock(&dss_cache.lock);
455
456         for (i = 0; i < num_ovls; ++i) {
457                 oc = &dss_cache.overlay_cache[i];
458                 if (!mgr_busy[oc->channel])
459                         oc->shadow_dirty = false;
460         }
461
462         for (i = 0; i < num_mgrs; ++i) {
463                 mc = &dss_cache.manager_cache[i];
464                 if (!mgr_busy[i])
465                         mc->shadow_dirty = false;
466         }
467
468         r = dss_write_regs();
469         if (r == 1)
470                 goto end;
471
472         /* re-read busy flags */
473         for (i = 0; i < num_mgrs; i++)
474                 mgr_busy[i] = dispc_mgr_go_busy(i);
475
476         /* keep running as long as there are busy managers, so that
477          * we can collect overlay-applied information */
478         for (i = 0; i < num_mgrs; ++i) {
479                 if (mgr_busy[i])
480                         goto end;
481         }
482
483         dss_unregister_vsync_isr();
484
485 end:
486         spin_unlock(&dss_cache.lock);
487 }
488
489 static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
490 {
491         struct overlay_cache_data *oc;
492         struct omap_dss_device *dssdev;
493
494         oc = &dss_cache.overlay_cache[ovl->id];
495
496         if (ovl->manager_changed) {
497                 ovl->manager_changed = false;
498                 ovl->info_dirty  = true;
499         }
500
501         if (!overlay_enabled(ovl)) {
502                 if (oc->enabled) {
503                         oc->enabled = false;
504                         oc->dirty = true;
505                 }
506                 return 0;
507         }
508
509         if (!ovl->info_dirty)
510                 return 0;
511
512         dssdev = ovl->manager->device;
513
514         if (dss_check_overlay(ovl, dssdev)) {
515                 if (oc->enabled) {
516                         oc->enabled = false;
517                         oc->dirty = true;
518                 }
519                 return -EINVAL;
520         }
521
522         ovl->info_dirty = false;
523         oc->dirty = true;
524         oc->info = ovl->info;
525
526         oc->channel = ovl->manager->id;
527
528         oc->enabled = true;
529
530         return 0;
531 }
532
533 static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
534 {
535         struct manager_cache_data *mc;
536
537         mc = &dss_cache.manager_cache[mgr->id];
538
539         if (mgr->device_changed) {
540                 mgr->device_changed = false;
541                 mgr->info_dirty  = true;
542         }
543
544         if (!mgr->info_dirty)
545                 return;
546
547         if (!mgr->device)
548                 return;
549
550         mgr->info_dirty = false;
551         mc->dirty = true;
552         mc->info = mgr->info;
553
554         mc->manual_update = mgr_manual_update(mgr);
555 }
556
557 static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
558 {
559         struct overlay_cache_data *oc;
560         struct omap_dss_device *dssdev;
561         u32 size, burst_size;
562
563         oc = &dss_cache.overlay_cache[ovl->id];
564
565         if (!oc->enabled)
566                 return;
567
568         dssdev = ovl->manager->device;
569
570         size = dispc_ovl_get_fifo_size(ovl->id);
571
572         burst_size = dispc_ovl_get_burst_size(ovl->id);
573
574         switch (dssdev->type) {
575         case OMAP_DISPLAY_TYPE_DPI:
576         case OMAP_DISPLAY_TYPE_DBI:
577         case OMAP_DISPLAY_TYPE_SDI:
578         case OMAP_DISPLAY_TYPE_VENC:
579         case OMAP_DISPLAY_TYPE_HDMI:
580                 default_get_overlay_fifo_thresholds(ovl->id, size,
581                                 burst_size, &oc->fifo_low,
582                                 &oc->fifo_high);
583                 break;
584 #ifdef CONFIG_OMAP2_DSS_DSI
585         case OMAP_DISPLAY_TYPE_DSI:
586                 dsi_get_overlay_fifo_thresholds(ovl->id, size,
587                                 burst_size, &oc->fifo_low,
588                                 &oc->fifo_high);
589                 break;
590 #endif
591         default:
592                 BUG();
593         }
594 }
595
596 int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
597 {
598         int r;
599         unsigned long flags;
600         struct omap_overlay *ovl;
601
602         DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
603
604         r = dispc_runtime_get();
605         if (r)
606                 return r;
607
608         spin_lock_irqsave(&dss_cache.lock, flags);
609
610         /* Configure overlays */
611         list_for_each_entry(ovl, &mgr->overlays, list)
612                 omap_dss_mgr_apply_ovl(ovl);
613
614         /* Configure manager */
615         omap_dss_mgr_apply_mgr(mgr);
616
617         /* Configure overlay fifos */
618         list_for_each_entry(ovl, &mgr->overlays, list)
619                 omap_dss_mgr_apply_ovl_fifos(ovl);
620
621         r = 0;
622         if (mgr->enabled && !mgr_manual_update(mgr)) {
623                 if (!dss_cache.irq_enabled)
624                         dss_register_vsync_isr();
625
626                 dss_write_regs();
627         }
628
629         spin_unlock_irqrestore(&dss_cache.lock, flags);
630
631         dispc_runtime_put();
632
633         return r;
634 }
635
636 void dss_mgr_enable(struct omap_overlay_manager *mgr)
637 {
638         dispc_mgr_enable(mgr->id, true);
639         mgr->enabled = true;
640 }
641
642 void dss_mgr_disable(struct omap_overlay_manager *mgr)
643 {
644         dispc_mgr_enable(mgr->id, false);
645         mgr->enabled = false;
646 }
647