2 * Copyright (C) 2011 Texas Instruments
3 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #define DSS_SUBSYS_NAME "APPLY"
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/jiffies.h>
25 #include <video/omapdss.h>
28 #include "dss_features.h"
31 * We have 4 levels of cache for the dispc settings. First two are in SW and
32 * the latter two in HW.
34 * +--------------------+
35 * |overlay/manager_info|
36 * +--------------------+
40 * +--------------------+
42 * +--------------------+
46 * +--------------------+
47 * | shadow registers |
48 * +--------------------+
50 * VFP or lcd/digit_enable
52 * +--------------------+
54 * +--------------------+
57 struct overlay_cache_data {
58 /* If true, cache changed, but not written to shadow registers. Set
59 * in apply(), cleared when registers written. */
61 /* If true, shadow registers contain changed values not yet in real
62 * registers. Set when writing to shadow registers, cleared at
68 struct omap_overlay_info info;
70 enum omap_channel channel;
76 struct manager_cache_data {
77 /* If true, cache changed, but not written to shadow registers. Set
78 * in apply(), cleared when registers written. */
80 /* If true, shadow registers contain changed values not yet in real
81 * registers. Set when writing to shadow registers, cleared at
85 struct omap_overlay_manager_info info;
88 bool do_manual_update;
93 struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
94 struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
99 void dss_apply_init(void)
101 spin_lock_init(&dss_cache.lock);
104 static bool ovl_manual_update(struct omap_overlay *ovl)
106 return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
109 static bool mgr_manual_update(struct omap_overlay_manager *mgr)
111 return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
114 static int overlay_enabled(struct omap_overlay *ovl)
116 return ovl->info.enabled && ovl->manager && ovl->manager->device;
119 int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
121 unsigned long timeout = msecs_to_jiffies(500);
122 struct manager_cache_data *mc;
126 struct omap_dss_device *dssdev = mgr->device;
128 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
131 if (mgr_manual_update(mgr))
134 irq = dispc_mgr_get_vsync_irq(mgr->id);
136 mc = &dss_cache.manager_cache[mgr->id];
140 bool shadow_dirty, dirty;
142 spin_lock_irqsave(&dss_cache.lock, flags);
144 shadow_dirty = mc->shadow_dirty;
145 spin_unlock_irqrestore(&dss_cache.lock, flags);
147 if (!dirty && !shadow_dirty) {
152 /* 4 iterations is the worst case:
153 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
154 * 2 - first VSYNC, dirty = true
155 * 3 - dirty = false, shadow_dirty = true
156 * 4 - shadow_dirty = false */
158 DSSERR("mgr(%d)->wait_for_go() not finishing\n",
164 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
165 if (r == -ERESTARTSYS)
169 DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
177 int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
179 unsigned long timeout = msecs_to_jiffies(500);
180 struct overlay_cache_data *oc;
181 struct omap_dss_device *dssdev;
189 dssdev = ovl->manager->device;
191 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
194 if (ovl_manual_update(ovl))
197 irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
199 oc = &dss_cache.overlay_cache[ovl->id];
203 bool shadow_dirty, dirty;
205 spin_lock_irqsave(&dss_cache.lock, flags);
207 shadow_dirty = oc->shadow_dirty;
208 spin_unlock_irqrestore(&dss_cache.lock, flags);
210 if (!dirty && !shadow_dirty) {
215 /* 4 iterations is the worst case:
216 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
217 * 2 - first VSYNC, dirty = true
218 * 3 - dirty = false, shadow_dirty = true
219 * 4 - shadow_dirty = false */
221 DSSERR("ovl(%d)->wait_for_go() not finishing\n",
227 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
228 if (r == -ERESTARTSYS)
232 DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
240 static int dss_ovl_write_regs(struct omap_overlay *ovl)
242 struct overlay_cache_data *c;
243 struct omap_overlay_info *oi;
244 bool ilace, replication;
247 DSSDBGF("%d", ovl->id);
249 c = &dss_cache.overlay_cache[ovl->id];
253 dispc_ovl_enable(ovl->id, 0);
257 replication = dss_use_replication(ovl->manager->device, oi->color_mode);
259 ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
261 dispc_ovl_set_channel_out(ovl->id, c->channel);
263 r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
265 /* this shouldn't happen */
266 DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
267 dispc_ovl_enable(ovl->id, 0);
271 dispc_ovl_set_fifo_threshold(ovl->id, c->fifo_low, c->fifo_high);
273 dispc_ovl_enable(ovl->id, 1);
278 static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
280 struct omap_overlay_manager_info *mi;
282 DSSDBGF("%d", mgr->id);
284 mi = &dss_cache.manager_cache[mgr->id].info;
286 dispc_mgr_setup(mgr->id, mi);
289 /* dss_write_regs() tries to write values from cache to shadow registers.
290 * It writes only to those managers/overlays that are not busy.
291 * returns 0 if everything could be written to shadow registers.
292 * returns 1 if not everything could be written to shadow registers. */
293 static int dss_write_regs(void)
295 struct omap_overlay *ovl;
296 struct omap_overlay_manager *mgr;
297 struct overlay_cache_data *oc;
298 struct manager_cache_data *mc;
299 const int num_ovls = dss_feat_get_num_ovls();
300 const int num_mgrs = dss_feat_get_num_mgrs();
303 bool mgr_busy[MAX_DSS_MANAGERS];
304 bool mgr_go[MAX_DSS_MANAGERS];
310 for (i = 0; i < num_mgrs; i++) {
311 mgr_busy[i] = dispc_mgr_go_busy(i);
315 /* Commit overlay settings */
316 for (i = 0; i < num_ovls; ++i) {
317 ovl = omap_dss_get_overlay(i);
318 oc = &dss_cache.overlay_cache[i];
319 mc = &dss_cache.manager_cache[oc->channel];
324 if (mc->manual_update && !mc->do_manual_update)
327 if (mgr_busy[oc->channel]) {
332 r = dss_ovl_write_regs(ovl);
334 DSSERR("dss_ovl_write_regs %d failed\n", i);
337 oc->shadow_dirty = true;
338 mgr_go[oc->channel] = true;
341 /* Commit manager settings */
342 for (i = 0; i < num_mgrs; ++i) {
343 mgr = omap_dss_get_overlay_manager(i);
344 mc = &dss_cache.manager_cache[i];
349 if (mc->manual_update && !mc->do_manual_update)
357 dss_mgr_write_regs(mgr);
359 mc->shadow_dirty = true;
364 for (i = 0; i < num_mgrs; ++i) {
365 mc = &dss_cache.manager_cache[i];
370 /* We don't need GO with manual update display. LCD iface will
371 * always be turned off after frame, and new settings will be
372 * taken in to use at next update */
373 if (!mc->manual_update)
385 void dss_mgr_start_update(struct omap_overlay_manager *mgr)
387 struct manager_cache_data *mc;
388 struct overlay_cache_data *oc;
389 struct omap_overlay *ovl;
391 mc = &dss_cache.manager_cache[mgr->id];
393 mc->do_manual_update = true;
395 mc->do_manual_update = false;
397 list_for_each_entry(ovl, &mgr->overlays, list) {
398 oc = &dss_cache.overlay_cache[ovl->id];
399 oc->shadow_dirty = false;
402 mc = &dss_cache.manager_cache[mgr->id];
403 mc->shadow_dirty = false;
405 dispc_mgr_enable(mgr->id, true);
408 static void dss_apply_irq_handler(void *data, u32 mask);
410 static void dss_register_vsync_isr(void)
412 const int num_mgrs = dss_feat_get_num_mgrs();
417 for (i = 0; i < num_mgrs; ++i)
418 mask |= dispc_mgr_get_vsync_irq(i);
420 r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
423 dss_cache.irq_enabled = true;
426 static void dss_unregister_vsync_isr(void)
428 const int num_mgrs = dss_feat_get_num_mgrs();
433 for (i = 0; i < num_mgrs; ++i)
434 mask |= dispc_mgr_get_vsync_irq(i);
436 r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
439 dss_cache.irq_enabled = false;
442 static void dss_apply_irq_handler(void *data, u32 mask)
444 struct manager_cache_data *mc;
445 struct overlay_cache_data *oc;
446 const int num_ovls = dss_feat_get_num_ovls();
447 const int num_mgrs = dss_feat_get_num_mgrs();
449 bool mgr_busy[MAX_DSS_MANAGERS];
451 for (i = 0; i < num_mgrs; i++)
452 mgr_busy[i] = dispc_mgr_go_busy(i);
454 spin_lock(&dss_cache.lock);
456 for (i = 0; i < num_ovls; ++i) {
457 oc = &dss_cache.overlay_cache[i];
458 if (!mgr_busy[oc->channel])
459 oc->shadow_dirty = false;
462 for (i = 0; i < num_mgrs; ++i) {
463 mc = &dss_cache.manager_cache[i];
465 mc->shadow_dirty = false;
468 r = dss_write_regs();
472 /* re-read busy flags */
473 for (i = 0; i < num_mgrs; i++)
474 mgr_busy[i] = dispc_mgr_go_busy(i);
476 /* keep running as long as there are busy managers, so that
477 * we can collect overlay-applied information */
478 for (i = 0; i < num_mgrs; ++i) {
483 dss_unregister_vsync_isr();
486 spin_unlock(&dss_cache.lock);
489 static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
491 struct overlay_cache_data *oc;
492 struct omap_dss_device *dssdev;
494 oc = &dss_cache.overlay_cache[ovl->id];
496 if (ovl->manager_changed) {
497 ovl->manager_changed = false;
498 ovl->info_dirty = true;
501 if (!overlay_enabled(ovl)) {
509 if (!ovl->info_dirty)
512 dssdev = ovl->manager->device;
514 if (dss_check_overlay(ovl, dssdev)) {
522 ovl->info_dirty = false;
524 oc->info = ovl->info;
526 oc->channel = ovl->manager->id;
533 static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
535 struct manager_cache_data *mc;
537 mc = &dss_cache.manager_cache[mgr->id];
539 if (mgr->device_changed) {
540 mgr->device_changed = false;
541 mgr->info_dirty = true;
544 if (!mgr->info_dirty)
550 mgr->info_dirty = false;
552 mc->info = mgr->info;
554 mc->manual_update = mgr_manual_update(mgr);
557 static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
559 struct overlay_cache_data *oc;
560 struct omap_dss_device *dssdev;
561 u32 size, burst_size;
563 oc = &dss_cache.overlay_cache[ovl->id];
568 dssdev = ovl->manager->device;
570 size = dispc_ovl_get_fifo_size(ovl->id);
572 burst_size = dispc_ovl_get_burst_size(ovl->id);
574 switch (dssdev->type) {
575 case OMAP_DISPLAY_TYPE_DPI:
576 case OMAP_DISPLAY_TYPE_DBI:
577 case OMAP_DISPLAY_TYPE_SDI:
578 case OMAP_DISPLAY_TYPE_VENC:
579 case OMAP_DISPLAY_TYPE_HDMI:
580 default_get_overlay_fifo_thresholds(ovl->id, size,
581 burst_size, &oc->fifo_low,
584 #ifdef CONFIG_OMAP2_DSS_DSI
585 case OMAP_DISPLAY_TYPE_DSI:
586 dsi_get_overlay_fifo_thresholds(ovl->id, size,
587 burst_size, &oc->fifo_low,
596 int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
600 struct omap_overlay *ovl;
602 DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
604 r = dispc_runtime_get();
608 spin_lock_irqsave(&dss_cache.lock, flags);
610 /* Configure overlays */
611 list_for_each_entry(ovl, &mgr->overlays, list)
612 omap_dss_mgr_apply_ovl(ovl);
614 /* Configure manager */
615 omap_dss_mgr_apply_mgr(mgr);
617 /* Configure overlay fifos */
618 list_for_each_entry(ovl, &mgr->overlays, list)
619 omap_dss_mgr_apply_ovl_fifos(ovl);
622 if (mgr->enabled && !mgr_manual_update(mgr)) {
623 if (!dss_cache.irq_enabled)
624 dss_register_vsync_isr();
629 spin_unlock_irqrestore(&dss_cache.lock, flags);
636 void dss_mgr_enable(struct omap_overlay_manager *mgr)
638 dispc_mgr_enable(mgr->id, true);
642 void dss_mgr_disable(struct omap_overlay_manager *mgr)
644 dispc_mgr_enable(mgr->id, false);
645 mgr->enabled = false;