2 * Copyright (C) 2011 Texas Instruments
3 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #define DSS_SUBSYS_NAME "APPLY"
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/jiffies.h>
25 #include <video/omapdss.h>
28 #include "dss_features.h"
31 * We have 4 levels of cache for the dispc settings. First two are in SW and
32 * the latter two in HW.
34 * +--------------------+
35 * |overlay/manager_info|
36 * +--------------------+
40 * +--------------------+
42 * +--------------------+
46 * +--------------------+
47 * | shadow registers |
48 * +--------------------+
50 * VFP or lcd/digit_enable
52 * +--------------------+
54 * +--------------------+
57 struct overlay_cache_data {
58 /* If true, cache changed, but not written to shadow registers. Set
59 * in apply(), cleared when registers written. */
61 /* If true, shadow registers contain changed values not yet in real
62 * registers. Set when writing to shadow registers, cleared at
68 struct omap_overlay_info info;
70 enum omap_channel channel;
76 struct manager_cache_data {
77 /* If true, cache changed, but not written to shadow registers. Set
78 * in apply(), cleared when registers written. */
80 /* If true, shadow registers contain changed values not yet in real
81 * registers. Set when writing to shadow registers, cleared at
85 struct omap_overlay_manager_info info;
88 bool do_manual_update;
93 struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
94 struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
99 void dss_apply_init(void)
101 spin_lock_init(&dss_cache.lock);
104 static bool ovl_manual_update(struct omap_overlay *ovl)
106 return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
109 static bool mgr_manual_update(struct omap_overlay_manager *mgr)
111 return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
114 static int overlay_enabled(struct omap_overlay *ovl)
116 return ovl->info.enabled && ovl->manager && ovl->manager->device;
119 int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
121 unsigned long timeout = msecs_to_jiffies(500);
122 struct manager_cache_data *mc;
126 struct omap_dss_device *dssdev = mgr->device;
128 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
131 if (mgr_manual_update(mgr))
134 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
135 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
136 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
138 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
139 DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
142 mc = &dss_cache.manager_cache[mgr->id];
146 bool shadow_dirty, dirty;
148 spin_lock_irqsave(&dss_cache.lock, flags);
150 shadow_dirty = mc->shadow_dirty;
151 spin_unlock_irqrestore(&dss_cache.lock, flags);
153 if (!dirty && !shadow_dirty) {
158 /* 4 iterations is the worst case:
159 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
160 * 2 - first VSYNC, dirty = true
161 * 3 - dirty = false, shadow_dirty = true
162 * 4 - shadow_dirty = false */
164 DSSERR("mgr(%d)->wait_for_go() not finishing\n",
170 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
171 if (r == -ERESTARTSYS)
175 DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
183 int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
185 unsigned long timeout = msecs_to_jiffies(500);
186 struct overlay_cache_data *oc;
187 struct omap_dss_device *dssdev;
195 dssdev = ovl->manager->device;
197 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
200 if (ovl_manual_update(ovl))
203 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
204 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
205 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
207 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
208 DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
211 oc = &dss_cache.overlay_cache[ovl->id];
215 bool shadow_dirty, dirty;
217 spin_lock_irqsave(&dss_cache.lock, flags);
219 shadow_dirty = oc->shadow_dirty;
220 spin_unlock_irqrestore(&dss_cache.lock, flags);
222 if (!dirty && !shadow_dirty) {
227 /* 4 iterations is the worst case:
228 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
229 * 2 - first VSYNC, dirty = true
230 * 3 - dirty = false, shadow_dirty = true
231 * 4 - shadow_dirty = false */
233 DSSERR("ovl(%d)->wait_for_go() not finishing\n",
239 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
240 if (r == -ERESTARTSYS)
244 DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
252 static int configure_overlay(enum omap_plane plane)
254 struct omap_overlay *ovl;
255 struct overlay_cache_data *c;
256 struct omap_overlay_info *oi;
257 bool ilace, replication;
260 DSSDBGF("%d", plane);
262 c = &dss_cache.overlay_cache[plane];
266 dispc_ovl_enable(plane, 0);
270 ovl = omap_dss_get_overlay(plane);
272 replication = dss_use_replication(ovl->manager->device, oi->color_mode);
274 ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
276 dispc_ovl_set_channel_out(plane, c->channel);
278 r = dispc_ovl_setup(plane, oi, ilace, replication);
280 /* this shouldn't happen */
281 DSSERR("dispc_ovl_setup failed for ovl %d\n", plane);
282 dispc_ovl_enable(plane, 0);
286 dispc_ovl_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
288 dispc_ovl_enable(plane, 1);
293 static void configure_manager(enum omap_channel channel)
295 struct omap_overlay_manager_info *mi;
297 DSSDBGF("%d", channel);
299 /* picking info from the cache */
300 mi = &dss_cache.manager_cache[channel].info;
302 dispc_mgr_setup(channel, mi);
305 /* configure_dispc() tries to write values from cache to shadow registers.
306 * It writes only to those managers/overlays that are not busy.
307 * returns 0 if everything could be written to shadow registers.
308 * returns 1 if not everything could be written to shadow registers. */
309 static int configure_dispc(void)
311 struct overlay_cache_data *oc;
312 struct manager_cache_data *mc;
313 const int num_ovls = dss_feat_get_num_ovls();
314 const int num_mgrs = dss_feat_get_num_mgrs();
317 bool mgr_busy[MAX_DSS_MANAGERS];
318 bool mgr_go[MAX_DSS_MANAGERS];
324 for (i = 0; i < num_mgrs; i++) {
325 mgr_busy[i] = dispc_mgr_go_busy(i);
329 /* Commit overlay settings */
330 for (i = 0; i < num_ovls; ++i) {
331 oc = &dss_cache.overlay_cache[i];
332 mc = &dss_cache.manager_cache[oc->channel];
337 if (mc->manual_update && !mc->do_manual_update)
340 if (mgr_busy[oc->channel]) {
345 r = configure_overlay(i);
347 DSSERR("configure_overlay %d failed\n", i);
350 oc->shadow_dirty = true;
351 mgr_go[oc->channel] = true;
354 /* Commit manager settings */
355 for (i = 0; i < num_mgrs; ++i) {
356 mc = &dss_cache.manager_cache[i];
361 if (mc->manual_update && !mc->do_manual_update)
369 configure_manager(i);
371 mc->shadow_dirty = true;
376 for (i = 0; i < num_mgrs; ++i) {
377 mc = &dss_cache.manager_cache[i];
382 /* We don't need GO with manual update display. LCD iface will
383 * always be turned off after frame, and new settings will be
384 * taken in to use at next update */
385 if (!mc->manual_update)
397 void dss_mgr_start_update(struct omap_overlay_manager *mgr)
399 struct manager_cache_data *mc;
400 struct overlay_cache_data *oc;
401 const int num_ovls = dss_feat_get_num_ovls();
402 const int num_mgrs = dss_feat_get_num_mgrs();
405 mc = &dss_cache.manager_cache[mgr->id];
407 mc->do_manual_update = true;
409 mc->do_manual_update = false;
411 for (i = 0; i < num_ovls; ++i) {
412 oc = &dss_cache.overlay_cache[i];
413 if (oc->channel != mgr->id)
416 oc->shadow_dirty = false;
419 for (i = 0; i < num_mgrs; ++i) {
420 mc = &dss_cache.manager_cache[i];
424 mc->shadow_dirty = false;
430 static void dss_apply_irq_handler(void *data, u32 mask)
432 struct manager_cache_data *mc;
433 struct overlay_cache_data *oc;
434 const int num_ovls = dss_feat_get_num_ovls();
435 const int num_mgrs = dss_feat_get_num_mgrs();
437 bool mgr_busy[MAX_DSS_MANAGERS];
440 for (i = 0; i < num_mgrs; i++)
441 mgr_busy[i] = dispc_mgr_go_busy(i);
443 spin_lock(&dss_cache.lock);
445 for (i = 0; i < num_ovls; ++i) {
446 oc = &dss_cache.overlay_cache[i];
447 if (!mgr_busy[oc->channel])
448 oc->shadow_dirty = false;
451 for (i = 0; i < num_mgrs; ++i) {
452 mc = &dss_cache.manager_cache[i];
454 mc->shadow_dirty = false;
457 r = configure_dispc();
461 /* re-read busy flags */
462 for (i = 0; i < num_mgrs; i++)
463 mgr_busy[i] = dispc_mgr_go_busy(i);
465 /* keep running as long as there are busy managers, so that
466 * we can collect overlay-applied information */
467 for (i = 0; i < num_mgrs; ++i) {
472 irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
473 DISPC_IRQ_EVSYNC_EVEN;
474 if (dss_has_feature(FEAT_MGR_LCD2))
475 irq_mask |= DISPC_IRQ_VSYNC2;
477 omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
478 dss_cache.irq_enabled = false;
481 spin_unlock(&dss_cache.lock);
484 static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
486 struct overlay_cache_data *oc;
487 struct omap_dss_device *dssdev;
489 oc = &dss_cache.overlay_cache[ovl->id];
491 if (ovl->manager_changed) {
492 ovl->manager_changed = false;
493 ovl->info_dirty = true;
496 if (!overlay_enabled(ovl)) {
504 if (!ovl->info_dirty)
507 dssdev = ovl->manager->device;
509 if (dss_check_overlay(ovl, dssdev)) {
517 ovl->info_dirty = false;
519 oc->info = ovl->info;
521 oc->channel = ovl->manager->id;
528 static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
530 struct manager_cache_data *mc;
532 mc = &dss_cache.manager_cache[mgr->id];
534 if (mgr->device_changed) {
535 mgr->device_changed = false;
536 mgr->info_dirty = true;
539 if (!mgr->info_dirty)
545 mgr->info_dirty = false;
547 mc->info = mgr->info;
549 mc->manual_update = mgr_manual_update(mgr);
552 static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
554 struct overlay_cache_data *oc;
555 struct omap_dss_device *dssdev;
556 u32 size, burst_size;
558 oc = &dss_cache.overlay_cache[ovl->id];
563 dssdev = ovl->manager->device;
565 size = dispc_ovl_get_fifo_size(ovl->id);
567 burst_size = dispc_ovl_get_burst_size(ovl->id);
569 switch (dssdev->type) {
570 case OMAP_DISPLAY_TYPE_DPI:
571 case OMAP_DISPLAY_TYPE_DBI:
572 case OMAP_DISPLAY_TYPE_SDI:
573 case OMAP_DISPLAY_TYPE_VENC:
574 case OMAP_DISPLAY_TYPE_HDMI:
575 default_get_overlay_fifo_thresholds(ovl->id, size,
576 burst_size, &oc->fifo_low,
579 #ifdef CONFIG_OMAP2_DSS_DSI
580 case OMAP_DISPLAY_TYPE_DSI:
581 dsi_get_overlay_fifo_thresholds(ovl->id, size,
582 burst_size, &oc->fifo_low,
591 int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
596 DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
598 r = dispc_runtime_get();
602 spin_lock_irqsave(&dss_cache.lock, flags);
604 /* Configure overlays */
605 for (i = 0; i < mgr->num_overlays; ++i) {
606 struct omap_overlay *ovl;
608 ovl = mgr->overlays[i];
610 if (ovl->manager != mgr)
613 omap_dss_mgr_apply_ovl(ovl);
616 /* Configure manager */
617 omap_dss_mgr_apply_mgr(mgr);
619 /* Configure overlay fifos */
620 for (i = 0; i < mgr->num_overlays; ++i) {
621 struct omap_overlay *ovl;
623 ovl = mgr->overlays[i];
625 if (ovl->manager != mgr)
628 omap_dss_mgr_apply_ovl_fifos(ovl);
632 if (!dss_cache.irq_enabled) {
635 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
636 DISPC_IRQ_EVSYNC_EVEN;
637 if (dss_has_feature(FEAT_MGR_LCD2))
638 mask |= DISPC_IRQ_VSYNC2;
640 r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
643 DSSERR("failed to register apply isr\n");
645 dss_cache.irq_enabled = true;
650 spin_unlock_irqrestore(&dss_cache.lock, flags);