2 * linux/drivers/video/omap2/dss/dsi.c
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #define DSS_SUBSYS_NAME "DSI"
22 #include <linux/kernel.h>
24 #include <linux/clk.h>
25 #include <linux/device.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/semaphore.h>
31 #include <linux/seq_file.h>
32 #include <linux/platform_device.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/wait.h>
35 #include <linux/workqueue.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/debugfs.h>
40 #include <video/omapdss.h>
41 #include <plat/clock.h>
44 #include "dss_features.h"
46 /*#define VERBOSE_IRQ*/
47 #define DSI_CATCH_MISSING_TE
49 struct dsi_reg { u16 idx; };
51 #define DSI_REG(idx) ((const struct dsi_reg) { idx })
53 #define DSI_SZ_REGS SZ_1K
54 /* DSI Protocol Engine */
56 #define DSI_REVISION DSI_REG(0x0000)
57 #define DSI_SYSCONFIG DSI_REG(0x0010)
58 #define DSI_SYSSTATUS DSI_REG(0x0014)
59 #define DSI_IRQSTATUS DSI_REG(0x0018)
60 #define DSI_IRQENABLE DSI_REG(0x001C)
61 #define DSI_CTRL DSI_REG(0x0040)
62 #define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
63 #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
64 #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
65 #define DSI_CLK_CTRL DSI_REG(0x0054)
66 #define DSI_TIMING1 DSI_REG(0x0058)
67 #define DSI_TIMING2 DSI_REG(0x005C)
68 #define DSI_VM_TIMING1 DSI_REG(0x0060)
69 #define DSI_VM_TIMING2 DSI_REG(0x0064)
70 #define DSI_VM_TIMING3 DSI_REG(0x0068)
71 #define DSI_CLK_TIMING DSI_REG(0x006C)
72 #define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
73 #define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
74 #define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
75 #define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
76 #define DSI_VM_TIMING4 DSI_REG(0x0080)
77 #define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
78 #define DSI_VM_TIMING5 DSI_REG(0x0088)
79 #define DSI_VM_TIMING6 DSI_REG(0x008C)
80 #define DSI_VM_TIMING7 DSI_REG(0x0090)
81 #define DSI_STOPCLK_TIMING DSI_REG(0x0094)
82 #define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
83 #define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
84 #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
85 #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
86 #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
87 #define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
88 #define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
92 #define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
93 #define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
94 #define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
95 #define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
96 #define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
98 /* DSI_PLL_CTRL_SCP */
100 #define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
101 #define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
102 #define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
103 #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
104 #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
106 #define REG_GET(dsidev, idx, start, end) \
107 FLD_GET(dsi_read_reg(dsidev, idx), start, end)
109 #define REG_FLD_MOD(dsidev, idx, val, start, end) \
110 dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
112 /* Global interrupts */
113 #define DSI_IRQ_VC0 (1 << 0)
114 #define DSI_IRQ_VC1 (1 << 1)
115 #define DSI_IRQ_VC2 (1 << 2)
116 #define DSI_IRQ_VC3 (1 << 3)
117 #define DSI_IRQ_WAKEUP (1 << 4)
118 #define DSI_IRQ_RESYNC (1 << 5)
119 #define DSI_IRQ_PLL_LOCK (1 << 7)
120 #define DSI_IRQ_PLL_UNLOCK (1 << 8)
121 #define DSI_IRQ_PLL_RECALL (1 << 9)
122 #define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
123 #define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
124 #define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
125 #define DSI_IRQ_TE_TRIGGER (1 << 16)
126 #define DSI_IRQ_ACK_TRIGGER (1 << 17)
127 #define DSI_IRQ_SYNC_LOST (1 << 18)
128 #define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
129 #define DSI_IRQ_TA_TIMEOUT (1 << 20)
130 #define DSI_IRQ_ERROR_MASK \
131 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
133 #define DSI_IRQ_CHANNEL_MASK 0xf
135 /* Virtual channel interrupts */
136 #define DSI_VC_IRQ_CS (1 << 0)
137 #define DSI_VC_IRQ_ECC_CORR (1 << 1)
138 #define DSI_VC_IRQ_PACKET_SENT (1 << 2)
139 #define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
140 #define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
141 #define DSI_VC_IRQ_BTA (1 << 5)
142 #define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
143 #define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
144 #define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
145 #define DSI_VC_IRQ_ERROR_MASK \
146 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
147 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
148 DSI_VC_IRQ_FIFO_TX_UDF)
150 /* ComplexIO interrupts */
151 #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
152 #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
153 #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
154 #define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
155 #define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
156 #define DSI_CIO_IRQ_ERRESC1 (1 << 5)
157 #define DSI_CIO_IRQ_ERRESC2 (1 << 6)
158 #define DSI_CIO_IRQ_ERRESC3 (1 << 7)
159 #define DSI_CIO_IRQ_ERRESC4 (1 << 8)
160 #define DSI_CIO_IRQ_ERRESC5 (1 << 9)
161 #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
162 #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
163 #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
164 #define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
165 #define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
166 #define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
167 #define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
168 #define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
169 #define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
170 #define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
171 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
172 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
173 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
174 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
175 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
176 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
177 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
178 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
179 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
180 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
181 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
182 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
183 #define DSI_CIO_IRQ_ERROR_MASK \
184 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
185 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
186 DSI_CIO_IRQ_ERRSYNCESC5 | \
187 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
188 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
189 DSI_CIO_IRQ_ERRESC5 | \
190 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
191 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
192 DSI_CIO_IRQ_ERRCONTROL5 | \
193 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
194 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
195 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
196 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
197 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
199 #define DSI_DT_DCS_SHORT_WRITE_0 0x05
200 #define DSI_DT_DCS_SHORT_WRITE_1 0x15
201 #define DSI_DT_DCS_READ 0x06
202 #define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
203 #define DSI_DT_NULL_PACKET 0x09
204 #define DSI_DT_DCS_LONG_WRITE 0x39
206 #define DSI_DT_RX_ACK_WITH_ERR 0x02
207 #define DSI_DT_RX_DCS_LONG_READ 0x1c
208 #define DSI_DT_RX_SHORT_READ_1 0x21
209 #define DSI_DT_RX_SHORT_READ_2 0x22
211 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
213 #define DSI_MAX_NR_ISRS 2
215 struct dsi_isr_data {
223 DSI_FIFO_SIZE_32 = 1,
224 DSI_FIFO_SIZE_64 = 2,
225 DSI_FIFO_SIZE_96 = 3,
226 DSI_FIFO_SIZE_128 = 4,
237 DSI_DATA1_P = 1 << 2,
238 DSI_DATA1_N = 1 << 3,
239 DSI_DATA2_P = 1 << 4,
240 DSI_DATA2_N = 1 << 5,
243 struct dsi_update_region {
245 struct omap_dss_device *device;
248 struct dsi_irq_stats {
249 unsigned long last_reset;
251 unsigned dsi_irqs[32];
252 unsigned vc_irqs[4][32];
253 unsigned cio_irqs[32];
256 struct dsi_isr_tables {
257 struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
258 struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
259 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
263 struct platform_device *pdev;
267 void (*dsi_mux_pads)(bool enable);
269 struct dsi_clock_info current_cinfo;
271 bool vdds_dsi_enabled;
272 struct regulator *vdds_dsi_reg;
275 enum dsi_vc_mode mode;
276 struct omap_dss_device *dssdev;
277 enum fifo_size fifo_size;
282 struct semaphore bus_lock;
287 struct dsi_isr_tables isr_tables;
288 /* space for a copy used by the interrupt handler */
289 struct dsi_isr_tables isr_tables_copy;
292 struct dsi_update_region update_region;
297 struct workqueue_struct *workqueue;
299 void (*framedone_callback)(int, void *);
300 void *framedone_data;
302 struct delayed_work framedone_timeout_work;
304 #ifdef DSI_CATCH_MISSING_TE
305 struct timer_list te_timer;
308 unsigned long cache_req_pck;
309 unsigned long cache_clk_freq;
310 struct dsi_clock_info cache_cinfo;
313 spinlock_t errors_lock;
315 ktime_t perf_setup_time;
316 ktime_t perf_start_time;
321 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
322 spinlock_t irq_stats_lock;
323 struct dsi_irq_stats irq_stats;
325 /* DSI PLL Parameter Ranges */
326 unsigned long regm_max, regn_max;
327 unsigned long regm_dispc_max, regm_dsi_max;
328 unsigned long fint_min, fint_max;
329 unsigned long lpdiv_max;
331 unsigned scp_clk_refcount;
334 struct dsi_packet_sent_handler_data {
335 struct platform_device *dsidev;
336 struct completion *completion;
339 static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
342 static unsigned int dsi_perf;
343 module_param_named(dsi_perf, dsi_perf, bool, 0644);
346 static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
348 return dev_get_drvdata(&dsidev->dev);
351 static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
353 return dsi_pdev_map[dssdev->phy.dsi.module];
356 struct platform_device *dsi_get_dsidev_from_id(int module)
358 return dsi_pdev_map[module];
361 static int dsi_get_dsidev_id(struct platform_device *dsidev)
363 /* TEMP: Pass 0 as the dsi module index till the time the dsi platform
364 * device names aren't changed to the form "omapdss_dsi.0",
365 * "omapdss_dsi.1" and so on */
366 BUG_ON(dsidev->id != -1);
371 static inline void dsi_write_reg(struct platform_device *dsidev,
372 const struct dsi_reg idx, u32 val)
374 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
376 __raw_writel(val, dsi->base + idx.idx);
379 static inline u32 dsi_read_reg(struct platform_device *dsidev,
380 const struct dsi_reg idx)
382 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
384 return __raw_readl(dsi->base + idx.idx);
388 void dsi_save_context(void)
392 void dsi_restore_context(void)
396 void dsi_bus_lock(struct omap_dss_device *dssdev)
398 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
399 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
401 down(&dsi->bus_lock);
403 EXPORT_SYMBOL(dsi_bus_lock);
405 void dsi_bus_unlock(struct omap_dss_device *dssdev)
407 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
408 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
412 EXPORT_SYMBOL(dsi_bus_unlock);
414 static bool dsi_bus_is_locked(struct platform_device *dsidev)
416 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
418 return dsi->bus_lock.count == 0;
421 static void dsi_completion_handler(void *data, u32 mask)
423 complete((struct completion *)data);
426 static inline int wait_for_bit_change(struct platform_device *dsidev,
427 const struct dsi_reg idx, int bitnum, int value)
431 while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
440 static void dsi_perf_mark_setup(struct platform_device *dsidev)
442 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
443 dsi->perf_setup_time = ktime_get();
446 static void dsi_perf_mark_start(struct platform_device *dsidev)
448 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
449 dsi->perf_start_time = ktime_get();
452 static void dsi_perf_show(struct platform_device *dsidev, const char *name)
454 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
455 ktime_t t, setup_time, trans_time;
457 u32 setup_us, trans_us, total_us;
464 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
465 setup_us = (u32)ktime_to_us(setup_time);
469 trans_time = ktime_sub(t, dsi->perf_start_time);
470 trans_us = (u32)ktime_to_us(trans_time);
474 total_us = setup_us + trans_us;
476 total_bytes = dsi->update_region.w *
477 dsi->update_region.h *
478 dsi->update_region.device->ctrl.pixel_size / 8;
480 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
481 "%u bytes, %u kbytes/sec\n",
486 1000*1000 / total_us,
488 total_bytes * 1000 / total_us);
491 #define dsi_perf_mark_setup(x)
492 #define dsi_perf_mark_start(x)
493 #define dsi_perf_show(x, y)
496 static void print_irq_status(u32 status)
502 if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
505 printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
508 if (status & DSI_IRQ_##x) \
534 static void print_irq_status_vc(int channel, u32 status)
540 if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
543 printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
546 if (status & DSI_VC_IRQ_##x) \
563 static void print_irq_status_cio(u32 status)
568 printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
571 if (status & DSI_CIO_IRQ_##x) \
585 PIS(ERRCONTENTIONLP0_1);
586 PIS(ERRCONTENTIONLP1_1);
587 PIS(ERRCONTENTIONLP0_2);
588 PIS(ERRCONTENTIONLP1_2);
589 PIS(ERRCONTENTIONLP0_3);
590 PIS(ERRCONTENTIONLP1_3);
591 PIS(ULPSACTIVENOT_ALL0);
592 PIS(ULPSACTIVENOT_ALL1);
598 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
599 static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
600 u32 *vcstatus, u32 ciostatus)
602 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
605 spin_lock(&dsi->irq_stats_lock);
607 dsi->irq_stats.irq_count++;
608 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
610 for (i = 0; i < 4; ++i)
611 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
613 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
615 spin_unlock(&dsi->irq_stats_lock);
618 #define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
621 static int debug_irq;
623 static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
624 u32 *vcstatus, u32 ciostatus)
626 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
629 if (irqstatus & DSI_IRQ_ERROR_MASK) {
630 DSSERR("DSI error, irqstatus %x\n", irqstatus);
631 print_irq_status(irqstatus);
632 spin_lock(&dsi->errors_lock);
633 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
634 spin_unlock(&dsi->errors_lock);
635 } else if (debug_irq) {
636 print_irq_status(irqstatus);
639 for (i = 0; i < 4; ++i) {
640 if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
641 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
643 print_irq_status_vc(i, vcstatus[i]);
644 } else if (debug_irq) {
645 print_irq_status_vc(i, vcstatus[i]);
649 if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
650 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
651 print_irq_status_cio(ciostatus);
652 } else if (debug_irq) {
653 print_irq_status_cio(ciostatus);
657 static void dsi_call_isrs(struct dsi_isr_data *isr_array,
658 unsigned isr_array_size, u32 irqstatus)
660 struct dsi_isr_data *isr_data;
663 for (i = 0; i < isr_array_size; i++) {
664 isr_data = &isr_array[i];
665 if (isr_data->isr && isr_data->mask & irqstatus)
666 isr_data->isr(isr_data->arg, irqstatus);
670 static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
671 u32 irqstatus, u32 *vcstatus, u32 ciostatus)
675 dsi_call_isrs(isr_tables->isr_table,
676 ARRAY_SIZE(isr_tables->isr_table),
679 for (i = 0; i < 4; ++i) {
680 if (vcstatus[i] == 0)
682 dsi_call_isrs(isr_tables->isr_table_vc[i],
683 ARRAY_SIZE(isr_tables->isr_table_vc[i]),
688 dsi_call_isrs(isr_tables->isr_table_cio,
689 ARRAY_SIZE(isr_tables->isr_table_cio),
693 static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
695 struct platform_device *dsidev;
696 struct dsi_data *dsi;
697 u32 irqstatus, vcstatus[4], ciostatus;
700 dsidev = (struct platform_device *) arg;
701 dsi = dsi_get_dsidrv_data(dsidev);
703 spin_lock(&dsi->irq_lock);
705 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
707 /* IRQ is not for us */
709 spin_unlock(&dsi->irq_lock);
713 dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
714 /* flush posted write */
715 dsi_read_reg(dsidev, DSI_IRQSTATUS);
717 for (i = 0; i < 4; ++i) {
718 if ((irqstatus & (1 << i)) == 0) {
723 vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
725 dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
726 /* flush posted write */
727 dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
730 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
731 ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
733 dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
734 /* flush posted write */
735 dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
740 #ifdef DSI_CATCH_MISSING_TE
741 if (irqstatus & DSI_IRQ_TE_TRIGGER)
742 del_timer(&dsi->te_timer);
745 /* make a copy and unlock, so that isrs can unregister
747 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
748 sizeof(dsi->isr_tables));
750 spin_unlock(&dsi->irq_lock);
752 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
754 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
756 dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
761 /* dsi->irq_lock has to be locked by the caller */
762 static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
763 struct dsi_isr_data *isr_array,
764 unsigned isr_array_size, u32 default_mask,
765 const struct dsi_reg enable_reg,
766 const struct dsi_reg status_reg)
768 struct dsi_isr_data *isr_data;
775 for (i = 0; i < isr_array_size; i++) {
776 isr_data = &isr_array[i];
778 if (isr_data->isr == NULL)
781 mask |= isr_data->mask;
784 old_mask = dsi_read_reg(dsidev, enable_reg);
785 /* clear the irqstatus for newly enabled irqs */
786 dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
787 dsi_write_reg(dsidev, enable_reg, mask);
789 /* flush posted writes */
790 dsi_read_reg(dsidev, enable_reg);
791 dsi_read_reg(dsidev, status_reg);
794 /* dsi->irq_lock has to be locked by the caller */
795 static void _omap_dsi_set_irqs(struct platform_device *dsidev)
797 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
798 u32 mask = DSI_IRQ_ERROR_MASK;
799 #ifdef DSI_CATCH_MISSING_TE
800 mask |= DSI_IRQ_TE_TRIGGER;
802 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
803 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
804 DSI_IRQENABLE, DSI_IRQSTATUS);
807 /* dsi->irq_lock has to be locked by the caller */
808 static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
810 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
812 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
813 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
814 DSI_VC_IRQ_ERROR_MASK,
815 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
818 /* dsi->irq_lock has to be locked by the caller */
819 static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
821 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
823 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
824 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
825 DSI_CIO_IRQ_ERROR_MASK,
826 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
829 static void _dsi_initialize_irq(struct platform_device *dsidev)
831 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
835 spin_lock_irqsave(&dsi->irq_lock, flags);
837 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
839 _omap_dsi_set_irqs(dsidev);
840 for (vc = 0; vc < 4; ++vc)
841 _omap_dsi_set_irqs_vc(dsidev, vc);
842 _omap_dsi_set_irqs_cio(dsidev);
844 spin_unlock_irqrestore(&dsi->irq_lock, flags);
847 static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
848 struct dsi_isr_data *isr_array, unsigned isr_array_size)
850 struct dsi_isr_data *isr_data;
856 /* check for duplicate entry and find a free slot */
858 for (i = 0; i < isr_array_size; i++) {
859 isr_data = &isr_array[i];
861 if (isr_data->isr == isr && isr_data->arg == arg &&
862 isr_data->mask == mask) {
866 if (isr_data->isr == NULL && free_idx == -1)
873 isr_data = &isr_array[free_idx];
876 isr_data->mask = mask;
881 static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
882 struct dsi_isr_data *isr_array, unsigned isr_array_size)
884 struct dsi_isr_data *isr_data;
887 for (i = 0; i < isr_array_size; i++) {
888 isr_data = &isr_array[i];
889 if (isr_data->isr != isr || isr_data->arg != arg ||
890 isr_data->mask != mask)
893 isr_data->isr = NULL;
894 isr_data->arg = NULL;
903 static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
906 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
910 spin_lock_irqsave(&dsi->irq_lock, flags);
912 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
913 ARRAY_SIZE(dsi->isr_tables.isr_table));
916 _omap_dsi_set_irqs(dsidev);
918 spin_unlock_irqrestore(&dsi->irq_lock, flags);
923 static int dsi_unregister_isr(struct platform_device *dsidev,
924 omap_dsi_isr_t isr, void *arg, u32 mask)
926 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
930 spin_lock_irqsave(&dsi->irq_lock, flags);
932 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
933 ARRAY_SIZE(dsi->isr_tables.isr_table));
936 _omap_dsi_set_irqs(dsidev);
938 spin_unlock_irqrestore(&dsi->irq_lock, flags);
943 static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
944 omap_dsi_isr_t isr, void *arg, u32 mask)
946 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
950 spin_lock_irqsave(&dsi->irq_lock, flags);
952 r = _dsi_register_isr(isr, arg, mask,
953 dsi->isr_tables.isr_table_vc[channel],
954 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
957 _omap_dsi_set_irqs_vc(dsidev, channel);
959 spin_unlock_irqrestore(&dsi->irq_lock, flags);
964 static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
965 omap_dsi_isr_t isr, void *arg, u32 mask)
967 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
971 spin_lock_irqsave(&dsi->irq_lock, flags);
973 r = _dsi_unregister_isr(isr, arg, mask,
974 dsi->isr_tables.isr_table_vc[channel],
975 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
978 _omap_dsi_set_irqs_vc(dsidev, channel);
980 spin_unlock_irqrestore(&dsi->irq_lock, flags);
985 static int dsi_register_isr_cio(struct platform_device *dsidev,
986 omap_dsi_isr_t isr, void *arg, u32 mask)
988 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
992 spin_lock_irqsave(&dsi->irq_lock, flags);
994 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
995 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
998 _omap_dsi_set_irqs_cio(dsidev);
1000 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1005 static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1006 omap_dsi_isr_t isr, void *arg, u32 mask)
1008 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1009 unsigned long flags;
1012 spin_lock_irqsave(&dsi->irq_lock, flags);
1014 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1015 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1018 _omap_dsi_set_irqs_cio(dsidev);
1020 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1025 static u32 dsi_get_errors(struct platform_device *dsidev)
1027 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1028 unsigned long flags;
1030 spin_lock_irqsave(&dsi->errors_lock, flags);
1033 spin_unlock_irqrestore(&dsi->errors_lock, flags);
1037 /* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
1038 static inline void enable_clocks(bool enable)
1041 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1043 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1046 /* source clock for DSI PLL. this could also be PCLKFREE */
1047 static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1050 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1053 dss_clk_enable(DSS_CLK_SYSCK);
1055 dss_clk_disable(DSS_CLK_SYSCK);
1057 if (enable && dsi->pll_locked) {
1058 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1059 DSSERR("cannot lock PLL when enabling clocks\n");
1064 static void _dsi_print_reset_status(struct platform_device *dsidev)
1072 /* A dummy read using the SCP interface to any DSIPHY register is
1073 * required after DSIPHY reset to complete the reset of the DSI complex
1075 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1077 printk(KERN_DEBUG "DSI resets: ");
1079 l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
1080 printk("PLL (%d) ", FLD_GET(l, 0, 0));
1082 l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1083 printk("CIO (%d) ", FLD_GET(l, 29, 29));
1085 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1095 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1096 printk("PHY (%x%x%x, %d, %d, %d)\n",
1102 FLD_GET(l, 31, 31));
1105 #define _dsi_print_reset_status(x)
1108 static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1110 DSSDBG("dsi_if_enable(%d)\n", enable);
1112 enable = enable ? 1 : 0;
1113 REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1115 if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1116 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1123 unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1125 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1127 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1130 static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1132 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1134 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1137 static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1139 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1141 return dsi->current_cinfo.clkin4ddr / 16;
1144 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1147 int dsi_module = dsi_get_dsidev_id(dsidev);
1149 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1150 /* DSI FCLK source is DSS_CLK_FCK */
1151 r = dss_clk_get_rate(DSS_CLK_FCK);
1153 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1154 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1160 static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1162 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1163 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1164 unsigned long dsi_fclk;
1165 unsigned lp_clk_div;
1166 unsigned long lp_clk;
1168 lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1170 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1173 dsi_fclk = dsi_fclk_rate(dsidev);
1175 lp_clk = dsi_fclk / 2 / lp_clk_div;
1177 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1178 dsi->current_cinfo.lp_clk = lp_clk;
1179 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1181 /* LP_CLK_DIVISOR */
1182 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1184 /* LP_RX_SYNCHRO_ENABLE */
1185 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1190 static void dsi_enable_scp_clk(struct platform_device *dsidev)
1192 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1194 if (dsi->scp_clk_refcount++ == 0)
1195 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1198 static void dsi_disable_scp_clk(struct platform_device *dsidev)
1200 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1202 WARN_ON(dsi->scp_clk_refcount == 0);
1203 if (--dsi->scp_clk_refcount == 0)
1204 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1207 enum dsi_pll_power_state {
1208 DSI_PLL_POWER_OFF = 0x0,
1209 DSI_PLL_POWER_ON_HSCLK = 0x1,
1210 DSI_PLL_POWER_ON_ALL = 0x2,
1211 DSI_PLL_POWER_ON_DIV = 0x3,
1214 static int dsi_pll_power(struct platform_device *dsidev,
1215 enum dsi_pll_power_state state)
1219 /* DSI-PLL power command 0x3 is not working */
1220 if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1221 state == DSI_PLL_POWER_ON_DIV)
1222 state = DSI_PLL_POWER_ON_ALL;
1225 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1227 /* PLL_PWR_STATUS */
1228 while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1230 DSSERR("Failed to set DSI PLL power mode to %d\n",
1240 /* calculate clock rates using dividers in cinfo */
1241 static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1242 struct dsi_clock_info *cinfo)
1244 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1245 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1247 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1250 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1253 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1256 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1259 if (cinfo->use_sys_clk) {
1260 cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
1261 /* XXX it is unclear if highfreq should be used
1262 * with DSS_SYS_CLK source also */
1263 cinfo->highfreq = 0;
1265 cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
1267 if (cinfo->clkin < 32000000)
1268 cinfo->highfreq = 0;
1270 cinfo->highfreq = 1;
1273 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1275 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1278 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1280 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1283 if (cinfo->regm_dispc > 0)
1284 cinfo->dsi_pll_hsdiv_dispc_clk =
1285 cinfo->clkin4ddr / cinfo->regm_dispc;
1287 cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1289 if (cinfo->regm_dsi > 0)
1290 cinfo->dsi_pll_hsdiv_dsi_clk =
1291 cinfo->clkin4ddr / cinfo->regm_dsi;
1293 cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1298 int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1299 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1300 struct dispc_clock_info *dispc_cinfo)
1302 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1303 struct dsi_clock_info cur, best;
1304 struct dispc_clock_info best_dispc;
1305 int min_fck_per_pck;
1307 unsigned long dss_sys_clk, max_dss_fck;
1309 dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
1311 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1313 if (req_pck == dsi->cache_req_pck &&
1314 dsi->cache_cinfo.clkin == dss_sys_clk) {
1315 DSSDBG("DSI clock info found from cache\n");
1316 *dsi_cinfo = dsi->cache_cinfo;
1317 dispc_find_clk_divs(is_tft, req_pck,
1318 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1322 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1324 if (min_fck_per_pck &&
1325 req_pck * min_fck_per_pck > max_dss_fck) {
1326 DSSERR("Requested pixel clock not possible with the current "
1327 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1328 "the constraint off.\n");
1329 min_fck_per_pck = 0;
1332 DSSDBG("dsi_pll_calc\n");
1335 memset(&best, 0, sizeof(best));
1336 memset(&best_dispc, 0, sizeof(best_dispc));
1338 memset(&cur, 0, sizeof(cur));
1339 cur.clkin = dss_sys_clk;
1340 cur.use_sys_clk = 1;
1343 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1344 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1345 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1346 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1347 if (cur.highfreq == 0)
1348 cur.fint = cur.clkin / cur.regn;
1350 cur.fint = cur.clkin / (2 * cur.regn);
1352 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1355 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1356 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1359 a = 2 * cur.regm * (cur.clkin/1000);
1360 b = cur.regn * (cur.highfreq + 1);
1361 cur.clkin4ddr = a / b * 1000;
1363 if (cur.clkin4ddr > 1800 * 1000 * 1000)
1366 /* dsi_pll_hsdiv_dispc_clk(MHz) =
1367 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1368 for (cur.regm_dispc = 1; cur.regm_dispc <
1369 dsi->regm_dispc_max; ++cur.regm_dispc) {
1370 struct dispc_clock_info cur_dispc;
1371 cur.dsi_pll_hsdiv_dispc_clk =
1372 cur.clkin4ddr / cur.regm_dispc;
1374 /* this will narrow down the search a bit,
1375 * but still give pixclocks below what was
1377 if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
1380 if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
1383 if (min_fck_per_pck &&
1384 cur.dsi_pll_hsdiv_dispc_clk <
1385 req_pck * min_fck_per_pck)
1390 dispc_find_clk_divs(is_tft, req_pck,
1391 cur.dsi_pll_hsdiv_dispc_clk,
1394 if (abs(cur_dispc.pck - req_pck) <
1395 abs(best_dispc.pck - req_pck)) {
1397 best_dispc = cur_dispc;
1399 if (cur_dispc.pck == req_pck)
1407 if (min_fck_per_pck) {
1408 DSSERR("Could not find suitable clock settings.\n"
1409 "Turning FCK/PCK constraint off and"
1411 min_fck_per_pck = 0;
1415 DSSERR("Could not find suitable clock settings.\n");
1420 /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
1422 best.dsi_pll_hsdiv_dsi_clk = 0;
1427 *dispc_cinfo = best_dispc;
1429 dsi->cache_req_pck = req_pck;
1430 dsi->cache_clk_freq = 0;
1431 dsi->cache_cinfo = best;
1436 int dsi_pll_set_clock_div(struct platform_device *dsidev,
1437 struct dsi_clock_info *cinfo)
1439 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1443 u8 regn_start, regn_end, regm_start, regm_end;
1444 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1448 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1449 dsi->current_cinfo.highfreq = cinfo->highfreq;
1451 dsi->current_cinfo.fint = cinfo->fint;
1452 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1453 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1454 cinfo->dsi_pll_hsdiv_dispc_clk;
1455 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1456 cinfo->dsi_pll_hsdiv_dsi_clk;
1458 dsi->current_cinfo.regn = cinfo->regn;
1459 dsi->current_cinfo.regm = cinfo->regm;
1460 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1461 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1463 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1465 DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1466 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
1470 /* DSIPHY == CLKIN4DDR */
1471 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1475 cinfo->highfreq + 1,
1478 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1479 cinfo->clkin4ddr / 1000 / 1000 / 2);
1481 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1483 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1484 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1485 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1486 cinfo->dsi_pll_hsdiv_dispc_clk);
1487 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1488 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1489 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1490 cinfo->dsi_pll_hsdiv_dsi_clk);
1492 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, ®n_start, ®n_end);
1493 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, ®m_start, ®m_end);
1494 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, ®m_dispc_start,
1496 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, ®m_dsi_start,
1499 /* DSI_PLL_AUTOMODE = manual */
1500 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1502 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1503 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1505 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1507 l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1509 l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1510 regm_dispc_start, regm_dispc_end);
1511 /* DSIPROTO_CLOCK_DIV */
1512 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1513 regm_dsi_start, regm_dsi_end);
1514 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1516 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1518 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1519 f = cinfo->fint < 1000000 ? 0x3 :
1520 cinfo->fint < 1250000 ? 0x4 :
1521 cinfo->fint < 1500000 ? 0x5 :
1522 cinfo->fint < 1750000 ? 0x6 :
1526 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1528 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1529 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1530 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1531 11, 11); /* DSI_PLL_CLKSEL */
1532 l = FLD_MOD(l, cinfo->highfreq,
1533 12, 12); /* DSI_PLL_HIGHFREQ */
1534 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1535 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1536 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1537 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1539 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1541 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1542 DSSERR("dsi pll go bit not going down.\n");
1547 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1548 DSSERR("cannot lock PLL\n");
1553 dsi->pll_locked = 1;
1555 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1556 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1557 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1558 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1559 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1560 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1561 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1562 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1563 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1564 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1565 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1566 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1567 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1568 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1569 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1570 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1572 DSSDBG("PLL config done\n");
1577 int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1580 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1582 enum dsi_pll_power_state pwstate;
1584 DSSDBG("PLL init\n");
1586 if (dsi->vdds_dsi_reg == NULL) {
1587 struct regulator *vdds_dsi;
1589 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1591 if (IS_ERR(vdds_dsi)) {
1592 DSSERR("can't get VDDS_DSI regulator\n");
1593 return PTR_ERR(vdds_dsi);
1596 dsi->vdds_dsi_reg = vdds_dsi;
1600 dsi_enable_pll_clock(dsidev, 1);
1602 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1604 dsi_enable_scp_clk(dsidev);
1606 if (!dsi->vdds_dsi_enabled) {
1607 r = regulator_enable(dsi->vdds_dsi_reg);
1610 dsi->vdds_dsi_enabled = true;
1613 /* XXX PLL does not come out of reset without this... */
1614 dispc_pck_free_enable(1);
1616 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1617 DSSERR("PLL not coming out of reset.\n");
1619 dispc_pck_free_enable(0);
1623 /* XXX ... but if left on, we get problems when planes do not
1624 * fill the whole display. No idea about this */
1625 dispc_pck_free_enable(0);
1627 if (enable_hsclk && enable_hsdiv)
1628 pwstate = DSI_PLL_POWER_ON_ALL;
1629 else if (enable_hsclk)
1630 pwstate = DSI_PLL_POWER_ON_HSCLK;
1631 else if (enable_hsdiv)
1632 pwstate = DSI_PLL_POWER_ON_DIV;
1634 pwstate = DSI_PLL_POWER_OFF;
1636 r = dsi_pll_power(dsidev, pwstate);
1641 DSSDBG("PLL init done\n");
1645 if (dsi->vdds_dsi_enabled) {
1646 regulator_disable(dsi->vdds_dsi_reg);
1647 dsi->vdds_dsi_enabled = false;
1650 dsi_disable_scp_clk(dsidev);
1652 dsi_enable_pll_clock(dsidev, 0);
1656 void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1658 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1660 dsi->pll_locked = 0;
1661 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1662 if (disconnect_lanes) {
1663 WARN_ON(!dsi->vdds_dsi_enabled);
1664 regulator_disable(dsi->vdds_dsi_reg);
1665 dsi->vdds_dsi_enabled = false;
1668 dsi_disable_scp_clk(dsidev);
1670 dsi_enable_pll_clock(dsidev, 0);
1672 DSSDBG("PLL uninit done\n");
1675 static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1678 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1679 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1680 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1681 int dsi_module = dsi_get_dsidev_id(dsidev);
1683 dispc_clk_src = dss_get_dispc_clk_source();
1684 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1688 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1690 seq_printf(s, "dsi pll source = %s\n",
1691 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1693 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1695 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1696 cinfo->clkin4ddr, cinfo->regm);
1698 seq_printf(s, "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
1699 dss_get_generic_clk_source_name(dispc_clk_src),
1700 dss_feat_get_clk_source_name(dispc_clk_src),
1701 cinfo->dsi_pll_hsdiv_dispc_clk,
1703 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1706 seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
1707 dss_get_generic_clk_source_name(dsi_clk_src),
1708 dss_feat_get_clk_source_name(dsi_clk_src),
1709 cinfo->dsi_pll_hsdiv_dsi_clk,
1711 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1714 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1716 seq_printf(s, "dsi fclk source = %s (%s)\n",
1717 dss_get_generic_clk_source_name(dsi_clk_src),
1718 dss_feat_get_clk_source_name(dsi_clk_src));
1720 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1722 seq_printf(s, "DDR_CLK\t\t%lu\n",
1723 cinfo->clkin4ddr / 4);
1725 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1727 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1732 void dsi_dump_clocks(struct seq_file *s)
1734 struct platform_device *dsidev;
1737 for (i = 0; i < MAX_NUM_DSI; i++) {
1738 dsidev = dsi_get_dsidev_from_id(i);
1740 dsi_dump_dsidev_clocks(dsidev, s);
1744 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1745 static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1748 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1749 unsigned long flags;
1750 struct dsi_irq_stats stats;
1751 int dsi_module = dsi_get_dsidev_id(dsidev);
1753 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1755 stats = dsi->irq_stats;
1756 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1757 dsi->irq_stats.last_reset = jiffies;
1759 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1761 seq_printf(s, "period %u ms\n",
1762 jiffies_to_msecs(jiffies - stats.last_reset));
1764 seq_printf(s, "irqs %d\n", stats.irq_count);
1766 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1768 seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
1784 PIS(LDO_POWER_GOOD);
1789 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1790 stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1791 stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1792 stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1793 stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1795 seq_printf(s, "-- VC interrupts --\n");
1804 PIS(PP_BUSY_CHANGE);
1808 seq_printf(s, "%-20s %10d\n", #x, \
1809 stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1811 seq_printf(s, "-- CIO interrupts --\n");
1824 PIS(ERRCONTENTIONLP0_1);
1825 PIS(ERRCONTENTIONLP1_1);
1826 PIS(ERRCONTENTIONLP0_2);
1827 PIS(ERRCONTENTIONLP1_2);
1828 PIS(ERRCONTENTIONLP0_3);
1829 PIS(ERRCONTENTIONLP1_3);
1830 PIS(ULPSACTIVENOT_ALL0);
1831 PIS(ULPSACTIVENOT_ALL1);
1835 static void dsi1_dump_irqs(struct seq_file *s)
1837 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1839 dsi_dump_dsidev_irqs(dsidev, s);
1842 static void dsi2_dump_irqs(struct seq_file *s)
1844 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1846 dsi_dump_dsidev_irqs(dsidev, s);
1849 void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1850 const struct file_operations *debug_fops)
1852 struct platform_device *dsidev;
1854 dsidev = dsi_get_dsidev_from_id(0);
1856 debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1857 &dsi1_dump_irqs, debug_fops);
1859 dsidev = dsi_get_dsidev_from_id(1);
1861 debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1862 &dsi2_dump_irqs, debug_fops);
1866 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1869 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1871 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1872 dsi_enable_scp_clk(dsidev);
1874 DUMPREG(DSI_REVISION);
1875 DUMPREG(DSI_SYSCONFIG);
1876 DUMPREG(DSI_SYSSTATUS);
1877 DUMPREG(DSI_IRQSTATUS);
1878 DUMPREG(DSI_IRQENABLE);
1880 DUMPREG(DSI_COMPLEXIO_CFG1);
1881 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1882 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1883 DUMPREG(DSI_CLK_CTRL);
1884 DUMPREG(DSI_TIMING1);
1885 DUMPREG(DSI_TIMING2);
1886 DUMPREG(DSI_VM_TIMING1);
1887 DUMPREG(DSI_VM_TIMING2);
1888 DUMPREG(DSI_VM_TIMING3);
1889 DUMPREG(DSI_CLK_TIMING);
1890 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1891 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1892 DUMPREG(DSI_COMPLEXIO_CFG2);
1893 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1894 DUMPREG(DSI_VM_TIMING4);
1895 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1896 DUMPREG(DSI_VM_TIMING5);
1897 DUMPREG(DSI_VM_TIMING6);
1898 DUMPREG(DSI_VM_TIMING7);
1899 DUMPREG(DSI_STOPCLK_TIMING);
1901 DUMPREG(DSI_VC_CTRL(0));
1902 DUMPREG(DSI_VC_TE(0));
1903 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1904 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1905 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1906 DUMPREG(DSI_VC_IRQSTATUS(0));
1907 DUMPREG(DSI_VC_IRQENABLE(0));
1909 DUMPREG(DSI_VC_CTRL(1));
1910 DUMPREG(DSI_VC_TE(1));
1911 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1912 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1913 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1914 DUMPREG(DSI_VC_IRQSTATUS(1));
1915 DUMPREG(DSI_VC_IRQENABLE(1));
1917 DUMPREG(DSI_VC_CTRL(2));
1918 DUMPREG(DSI_VC_TE(2));
1919 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1920 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1921 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1922 DUMPREG(DSI_VC_IRQSTATUS(2));
1923 DUMPREG(DSI_VC_IRQENABLE(2));
1925 DUMPREG(DSI_VC_CTRL(3));
1926 DUMPREG(DSI_VC_TE(3));
1927 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1928 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1929 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1930 DUMPREG(DSI_VC_IRQSTATUS(3));
1931 DUMPREG(DSI_VC_IRQENABLE(3));
1933 DUMPREG(DSI_DSIPHY_CFG0);
1934 DUMPREG(DSI_DSIPHY_CFG1);
1935 DUMPREG(DSI_DSIPHY_CFG2);
1936 DUMPREG(DSI_DSIPHY_CFG5);
1938 DUMPREG(DSI_PLL_CONTROL);
1939 DUMPREG(DSI_PLL_STATUS);
1940 DUMPREG(DSI_PLL_GO);
1941 DUMPREG(DSI_PLL_CONFIGURATION1);
1942 DUMPREG(DSI_PLL_CONFIGURATION2);
1944 dsi_disable_scp_clk(dsidev);
1945 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1949 static void dsi1_dump_regs(struct seq_file *s)
1951 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1953 dsi_dump_dsidev_regs(dsidev, s);
1956 static void dsi2_dump_regs(struct seq_file *s)
1958 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1960 dsi_dump_dsidev_regs(dsidev, s);
1963 void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
1964 const struct file_operations *debug_fops)
1966 struct platform_device *dsidev;
1968 dsidev = dsi_get_dsidev_from_id(0);
1970 debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
1971 &dsi1_dump_regs, debug_fops);
1973 dsidev = dsi_get_dsidev_from_id(1);
1975 debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
1976 &dsi2_dump_regs, debug_fops);
1978 enum dsi_cio_power_state {
1979 DSI_COMPLEXIO_POWER_OFF = 0x0,
1980 DSI_COMPLEXIO_POWER_ON = 0x1,
1981 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1984 static int dsi_cio_power(struct platform_device *dsidev,
1985 enum dsi_cio_power_state state)
1990 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
1993 while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
1996 DSSERR("failed to set complexio power state to "
2006 static void dsi_set_lane_config(struct omap_dss_device *dssdev)
2008 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2011 int clk_lane = dssdev->phy.dsi.clk_lane;
2012 int data1_lane = dssdev->phy.dsi.data1_lane;
2013 int data2_lane = dssdev->phy.dsi.data2_lane;
2014 int clk_pol = dssdev->phy.dsi.clk_pol;
2015 int data1_pol = dssdev->phy.dsi.data1_pol;
2016 int data2_pol = dssdev->phy.dsi.data2_pol;
2018 r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
2019 r = FLD_MOD(r, clk_lane, 2, 0);
2020 r = FLD_MOD(r, clk_pol, 3, 3);
2021 r = FLD_MOD(r, data1_lane, 6, 4);
2022 r = FLD_MOD(r, data1_pol, 7, 7);
2023 r = FLD_MOD(r, data2_lane, 10, 8);
2024 r = FLD_MOD(r, data2_pol, 11, 11);
2025 dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
2027 /* The configuration of the DSI complex I/O (number of data lanes,
2028 position, differential order) should not be changed while
2029 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
2030 the hardware to take into account a new configuration of the complex
2031 I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
2032 follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
2033 then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
2034 DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
2035 DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
2036 DSI complex I/O configuration is unknown. */
2039 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2040 REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
2041 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
2042 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2046 static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2048 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2050 /* convert time in ns to ddr ticks, rounding up */
2051 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2052 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2055 static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2057 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2059 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2060 return ddr * 1000 * 1000 / (ddr_clk / 1000);
2063 static void dsi_cio_timings(struct platform_device *dsidev)
2066 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
2067 u32 tlpx_half, tclk_trail, tclk_zero;
2070 /* calculate timings */
2072 /* 1 * DDR_CLK = 2 * UI */
2074 /* min 40ns + 4*UI max 85ns + 6*UI */
2075 ths_prepare = ns2ddr(dsidev, 70) + 2;
2077 /* min 145ns + 10*UI */
2078 ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
2080 /* min max(8*UI, 60ns+4*UI) */
2081 ths_trail = ns2ddr(dsidev, 60) + 5;
2084 ths_exit = ns2ddr(dsidev, 145);
2087 tlpx_half = ns2ddr(dsidev, 25);
2090 tclk_trail = ns2ddr(dsidev, 60) + 2;
2092 /* min 38ns, max 95ns */
2093 tclk_prepare = ns2ddr(dsidev, 65);
2095 /* min tclk-prepare + tclk-zero = 300ns */
2096 tclk_zero = ns2ddr(dsidev, 260);
2098 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
2099 ths_prepare, ddr2ns(dsidev, ths_prepare),
2100 ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
2101 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
2102 ths_trail, ddr2ns(dsidev, ths_trail),
2103 ths_exit, ddr2ns(dsidev, ths_exit));
2105 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
2106 "tclk_zero %u (%uns)\n",
2107 tlpx_half, ddr2ns(dsidev, tlpx_half),
2108 tclk_trail, ddr2ns(dsidev, tclk_trail),
2109 tclk_zero, ddr2ns(dsidev, tclk_zero));
2110 DSSDBG("tclk_prepare %u (%uns)\n",
2111 tclk_prepare, ddr2ns(dsidev, tclk_prepare));
2113 /* program timings */
2115 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2116 r = FLD_MOD(r, ths_prepare, 31, 24);
2117 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
2118 r = FLD_MOD(r, ths_trail, 15, 8);
2119 r = FLD_MOD(r, ths_exit, 7, 0);
2120 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2122 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2123 r = FLD_MOD(r, tlpx_half, 22, 16);
2124 r = FLD_MOD(r, tclk_trail, 15, 8);
2125 r = FLD_MOD(r, tclk_zero, 7, 0);
2126 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2128 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2129 r = FLD_MOD(r, tclk_prepare, 7, 0);
2130 dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
2133 static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
2134 enum dsi_lane lanes)
2136 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2137 int clk_lane = dssdev->phy.dsi.clk_lane;
2138 int data1_lane = dssdev->phy.dsi.data1_lane;
2139 int data2_lane = dssdev->phy.dsi.data2_lane;
2140 int clk_pol = dssdev->phy.dsi.clk_pol;
2141 int data1_pol = dssdev->phy.dsi.data1_pol;
2142 int data2_pol = dssdev->phy.dsi.data2_pol;
2146 if (lanes & DSI_CLK_P)
2147 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
2148 if (lanes & DSI_CLK_N)
2149 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
2151 if (lanes & DSI_DATA1_P)
2152 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
2153 if (lanes & DSI_DATA1_N)
2154 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
2156 if (lanes & DSI_DATA2_P)
2157 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
2158 if (lanes & DSI_DATA2_N)
2159 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
2162 * Bits in REGLPTXSCPDAT4TO0DXDY:
2168 /* Set the lane override configuration */
2170 /* REGLPTXSCPDAT4TO0DXDY */
2171 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, 22, 17);
2173 /* Enable lane override */
2176 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2179 static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2181 /* Disable lane override */
2182 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2183 /* Reset the lane override configuration */
2184 /* REGLPTXSCPDAT4TO0DXDY */
2185 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2188 static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2190 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2195 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
2209 if (dssdev->phy.dsi.clk_lane != 0)
2210 in_use[dssdev->phy.dsi.clk_lane - 1] = true;
2211 if (dssdev->phy.dsi.data1_lane != 0)
2212 in_use[dssdev->phy.dsi.data1_lane - 1] = true;
2213 if (dssdev->phy.dsi.data2_lane != 0)
2214 in_use[dssdev->phy.dsi.data2_lane - 1] = true;
2222 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2225 for (i = 0; i < 3; ++i) {
2226 if (!in_use[i] || (l & (1 << bits[i])))
2234 for (i = 0; i < 3; ++i) {
2235 if (!in_use[i] || (l & (1 << bits[i])))
2238 DSSERR("CIO TXCLKESC%d domain not coming " \
2239 "out of reset\n", i);
2248 static int dsi_cio_init(struct omap_dss_device *dssdev)
2250 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2251 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2257 if (dsi->dsi_mux_pads)
2258 dsi->dsi_mux_pads(true);
2260 dsi_enable_scp_clk(dsidev);
2262 /* A dummy read using the SCP interface to any DSIPHY register is
2263 * required after DSIPHY reset to complete the reset of the DSI complex
2265 dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2267 if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2268 DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2270 goto err_scp_clk_dom;
2273 dsi_set_lane_config(dssdev);
2275 /* set TX STOP MODE timer to maximum for this operation */
2276 l = dsi_read_reg(dsidev, DSI_TIMING1);
2277 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2278 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2279 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2280 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2281 dsi_write_reg(dsidev, DSI_TIMING1, l);
2283 if (dsi->ulps_enabled) {
2284 DSSDBG("manual ulps exit\n");
2286 /* ULPS is exited by Mark-1 state for 1ms, followed by
2287 * stop state. DSS HW cannot do this via the normal
2288 * ULPS exit sequence, as after reset the DSS HW thinks
2289 * that we are not in ULPS mode, and refuses to send the
2290 * sequence. So we need to send the ULPS exit sequence
2294 dsi_cio_enable_lane_override(dssdev,
2295 DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P);
2298 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2302 if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2303 DSSERR("CIO PWR clock domain not coming out of reset.\n");
2305 goto err_cio_pwr_dom;
2308 dsi_if_enable(dsidev, true);
2309 dsi_if_enable(dsidev, false);
2310 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2312 r = dsi_cio_wait_tx_clk_esc_reset(dssdev);
2314 goto err_tx_clk_esc_rst;
2316 if (dsi->ulps_enabled) {
2317 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2318 ktime_t wait = ns_to_ktime(1000 * 1000);
2319 set_current_state(TASK_UNINTERRUPTIBLE);
2320 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2322 /* Disable the override. The lanes should be set to Mark-11
2323 * state by the HW */
2324 dsi_cio_disable_lane_override(dsidev);
2327 /* FORCE_TX_STOP_MODE_IO */
2328 REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2330 dsi_cio_timings(dsidev);
2332 dsi->ulps_enabled = false;
2334 DSSDBG("CIO init done\n");
2339 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2341 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2343 if (dsi->ulps_enabled)
2344 dsi_cio_disable_lane_override(dsidev);
2346 dsi_disable_scp_clk(dsidev);
2347 if (dsi->dsi_mux_pads)
2348 dsi->dsi_mux_pads(false);
2352 static void dsi_cio_uninit(struct platform_device *dsidev)
2354 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2356 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2357 dsi_disable_scp_clk(dsidev);
2358 if (dsi->dsi_mux_pads)
2359 dsi->dsi_mux_pads(false);
2362 static int _dsi_wait_reset(struct platform_device *dsidev)
2366 while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
2368 DSSERR("soft reset failed\n");
2377 static int _dsi_reset(struct platform_device *dsidev)
2380 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
2381 return _dsi_wait_reset(dsidev);
2384 static void dsi_config_tx_fifo(struct platform_device *dsidev,
2385 enum fifo_size size1, enum fifo_size size2,
2386 enum fifo_size size3, enum fifo_size size4)
2388 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2393 dsi->vc[0].fifo_size = size1;
2394 dsi->vc[1].fifo_size = size2;
2395 dsi->vc[2].fifo_size = size3;
2396 dsi->vc[3].fifo_size = size4;
2398 for (i = 0; i < 4; i++) {
2400 int size = dsi->vc[i].fifo_size;
2402 if (add + size > 4) {
2403 DSSERR("Illegal FIFO configuration\n");
2407 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2409 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2413 dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2416 static void dsi_config_rx_fifo(struct platform_device *dsidev,
2417 enum fifo_size size1, enum fifo_size size2,
2418 enum fifo_size size3, enum fifo_size size4)
2420 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2425 dsi->vc[0].fifo_size = size1;
2426 dsi->vc[1].fifo_size = size2;
2427 dsi->vc[2].fifo_size = size3;
2428 dsi->vc[3].fifo_size = size4;
2430 for (i = 0; i < 4; i++) {
2432 int size = dsi->vc[i].fifo_size;
2434 if (add + size > 4) {
2435 DSSERR("Illegal FIFO configuration\n");
2439 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2441 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2445 dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2448 static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2452 r = dsi_read_reg(dsidev, DSI_TIMING1);
2453 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2454 dsi_write_reg(dsidev, DSI_TIMING1, r);
2456 if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2457 DSSERR("TX_STOP bit not going down\n");
2464 static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2466 return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2469 static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2471 struct dsi_packet_sent_handler_data *vp_data =
2472 (struct dsi_packet_sent_handler_data *) data;
2473 struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2474 const int channel = dsi->update_channel;
2475 u8 bit = dsi->te_enabled ? 30 : 31;
2477 if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2478 complete(vp_data->completion);
2481 static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2483 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2484 DECLARE_COMPLETION_ONSTACK(completion);
2485 struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2489 bit = dsi->te_enabled ? 30 : 31;
2491 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2492 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2496 /* Wait for completion only if TE_EN/TE_START is still set */
2497 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2498 if (wait_for_completion_timeout(&completion,
2499 msecs_to_jiffies(10)) == 0) {
2500 DSSERR("Failed to complete previous frame transfer\n");
2506 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2507 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2511 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2512 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2517 static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2519 struct dsi_packet_sent_handler_data *l4_data =
2520 (struct dsi_packet_sent_handler_data *) data;
2521 struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2522 const int channel = dsi->update_channel;
2524 if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2525 complete(l4_data->completion);
2528 static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2530 DECLARE_COMPLETION_ONSTACK(completion);
2531 struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2534 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2535 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2539 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2540 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2541 if (wait_for_completion_timeout(&completion,
2542 msecs_to_jiffies(10)) == 0) {
2543 DSSERR("Failed to complete previous l4 transfer\n");
2549 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2550 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2554 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2555 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2560 static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2562 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2564 WARN_ON(!dsi_bus_is_locked(dsidev));
2566 WARN_ON(in_interrupt());
2568 if (!dsi_vc_is_enabled(dsidev, channel))
2571 switch (dsi->vc[channel].mode) {
2572 case DSI_VC_MODE_VP:
2573 return dsi_sync_vc_vp(dsidev, channel);
2574 case DSI_VC_MODE_L4:
2575 return dsi_sync_vc_l4(dsidev, channel);
2581 static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2584 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2587 enable = enable ? 1 : 0;
2589 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2591 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2592 0, enable) != enable) {
2593 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2600 static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2604 DSSDBGF("%d", channel);
2606 r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2608 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2609 DSSERR("VC(%d) busy when trying to configure it!\n",
2612 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2613 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
2614 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2615 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2616 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2617 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2618 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2619 if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2620 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2622 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2623 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2625 dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2628 static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
2630 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2632 if (dsi->vc[channel].mode == DSI_VC_MODE_L4)
2635 DSSDBGF("%d", channel);
2637 dsi_sync_vc(dsidev, channel);
2639 dsi_vc_enable(dsidev, channel, 0);
2642 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2643 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
2647 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
2649 /* DCS_CMD_ENABLE */
2650 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2651 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30);
2653 dsi_vc_enable(dsidev, channel, 1);
2655 dsi->vc[channel].mode = DSI_VC_MODE_L4;
2660 static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
2662 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2664 if (dsi->vc[channel].mode == DSI_VC_MODE_VP)
2667 DSSDBGF("%d", channel);
2669 dsi_sync_vc(dsidev, channel);
2671 dsi_vc_enable(dsidev, channel, 0);
2674 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2675 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2679 /* SOURCE, 1 = video port */
2680 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1);
2682 /* DCS_CMD_ENABLE */
2683 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2684 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30);
2686 dsi_vc_enable(dsidev, channel, 1);
2688 dsi->vc[channel].mode = DSI_VC_MODE_VP;
2694 void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2697 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2699 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2701 WARN_ON(!dsi_bus_is_locked(dsidev));
2703 dsi_vc_enable(dsidev, channel, 0);
2704 dsi_if_enable(dsidev, 0);
2706 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2708 dsi_vc_enable(dsidev, channel, 1);
2709 dsi_if_enable(dsidev, 1);
2711 dsi_force_tx_stop_mode_io(dsidev);
2713 EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
2715 static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2717 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2719 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2720 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2724 (val >> 24) & 0xff);
2728 static void dsi_show_rx_ack_with_err(u16 err)
2730 DSSERR("\tACK with ERROR (%#x):\n", err);
2732 DSSERR("\t\tSoT Error\n");
2734 DSSERR("\t\tSoT Sync Error\n");
2736 DSSERR("\t\tEoT Sync Error\n");
2738 DSSERR("\t\tEscape Mode Entry Command Error\n");
2740 DSSERR("\t\tLP Transmit Sync Error\n");
2742 DSSERR("\t\tHS Receive Timeout Error\n");
2744 DSSERR("\t\tFalse Control Error\n");
2746 DSSERR("\t\t(reserved7)\n");
2748 DSSERR("\t\tECC Error, single-bit (corrected)\n");
2750 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2751 if (err & (1 << 10))
2752 DSSERR("\t\tChecksum Error\n");
2753 if (err & (1 << 11))
2754 DSSERR("\t\tData type not recognized\n");
2755 if (err & (1 << 12))
2756 DSSERR("\t\tInvalid VC ID\n");
2757 if (err & (1 << 13))
2758 DSSERR("\t\tInvalid Transmission Length\n");
2759 if (err & (1 << 14))
2760 DSSERR("\t\t(reserved14)\n");
2761 if (err & (1 << 15))
2762 DSSERR("\t\tDSI Protocol Violation\n");
2765 static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2768 /* RX_FIFO_NOT_EMPTY */
2769 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2772 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2773 DSSERR("\trawval %#08x\n", val);
2774 dt = FLD_GET(val, 5, 0);
2775 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
2776 u16 err = FLD_GET(val, 23, 8);
2777 dsi_show_rx_ack_with_err(err);
2778 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2779 DSSERR("\tDCS short response, 1 byte: %#x\n",
2780 FLD_GET(val, 23, 8));
2781 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2782 DSSERR("\tDCS short response, 2 byte: %#x\n",
2783 FLD_GET(val, 23, 8));
2784 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2785 DSSERR("\tDCS long response, len %d\n",
2786 FLD_GET(val, 23, 8));
2787 dsi_vc_flush_long_data(dsidev, channel);
2789 DSSERR("\tunknown datatype 0x%02x\n", dt);
2795 static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2797 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2799 if (dsi->debug_write || dsi->debug_read)
2800 DSSDBG("dsi_vc_send_bta %d\n", channel);
2802 WARN_ON(!dsi_bus_is_locked(dsidev));
2804 /* RX_FIFO_NOT_EMPTY */
2805 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2806 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2807 dsi_vc_flush_receive_data(dsidev, channel);
2810 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2815 int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2817 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2818 DECLARE_COMPLETION_ONSTACK(completion);
2822 r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2823 &completion, DSI_VC_IRQ_BTA);
2827 r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2828 DSI_IRQ_ERROR_MASK);
2832 r = dsi_vc_send_bta(dsidev, channel);
2836 if (wait_for_completion_timeout(&completion,
2837 msecs_to_jiffies(500)) == 0) {
2838 DSSERR("Failed to receive BTA\n");
2843 err = dsi_get_errors(dsidev);
2845 DSSERR("Error while sending BTA: %x\n", err);
2850 dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2851 DSI_IRQ_ERROR_MASK);
2853 dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2854 &completion, DSI_VC_IRQ_BTA);
2858 EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2860 static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2861 int channel, u8 data_type, u16 len, u8 ecc)
2863 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2867 WARN_ON(!dsi_bus_is_locked(dsidev));
2869 data_id = data_type | dsi->vc[channel].vc_id << 6;
2871 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2872 FLD_VAL(ecc, 31, 24);
2874 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2877 static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2878 int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2882 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
2884 /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2885 b1, b2, b3, b4, val); */
2887 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2890 static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2891 u8 data_type, u8 *data, u16 len, u8 ecc)
2894 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2900 if (dsi->debug_write)
2901 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2904 if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
2905 DSSERR("unable to send long packet: packet too long.\n");
2909 dsi_vc_config_l4(dsidev, channel);
2911 dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
2914 for (i = 0; i < len >> 2; i++) {
2915 if (dsi->debug_write)
2916 DSSDBG("\tsending full packet %d\n", i);
2923 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
2928 b1 = 0; b2 = 0; b3 = 0;
2930 if (dsi->debug_write)
2931 DSSDBG("\tsending remainder bytes %d\n", i);
2948 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
2954 static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
2955 u8 data_type, u16 data, u8 ecc)
2957 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2961 WARN_ON(!dsi_bus_is_locked(dsidev));
2963 if (dsi->debug_write)
2964 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
2966 data_type, data & 0xff, (data >> 8) & 0xff);
2968 dsi_vc_config_l4(dsidev, channel);
2970 if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
2971 DSSERR("ERROR FIFO FULL, aborting transfer\n");
2975 data_id = data_type | dsi->vc[channel].vc_id << 6;
2977 r = (data_id << 0) | (data << 8) | (ecc << 24);
2979 dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
2984 int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
2986 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2987 u8 nullpkg[] = {0, 0, 0, 0};
2989 return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg,
2992 EXPORT_SYMBOL(dsi_vc_send_null);
2994 int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
2997 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3003 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0,
3005 } else if (len == 2) {
3006 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1,
3007 data[0] | (data[1] << 8), 0);
3009 /* 0x39 = DCS Long Write */
3010 r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3016 EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
3018 int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3021 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3024 r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len);
3028 r = dsi_vc_send_bta_sync(dssdev, channel);
3032 /* RX_FIFO_NOT_EMPTY */
3033 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
3034 DSSERR("rx fifo not empty after write, dumping data:\n");
3035 dsi_vc_flush_receive_data(dsidev, channel);
3042 DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n",
3043 channel, data[0], len);
3046 EXPORT_SYMBOL(dsi_vc_dcs_write);
3048 int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
3050 return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
3052 EXPORT_SYMBOL(dsi_vc_dcs_write_0);
3054 int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3060 return dsi_vc_dcs_write(dssdev, channel, buf, 2);
3062 EXPORT_SYMBOL(dsi_vc_dcs_write_1);
3064 int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3065 u8 *buf, int buflen)
3067 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3068 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3073 if (dsi->debug_read)
3074 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
3076 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
3080 r = dsi_vc_send_bta_sync(dssdev, channel);
3084 /* RX_FIFO_NOT_EMPTY */
3085 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
3086 DSSERR("RX fifo empty when trying to read.\n");
3091 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3092 if (dsi->debug_read)
3093 DSSDBG("\theader: %08x\n", val);
3094 dt = FLD_GET(val, 5, 0);
3095 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
3096 u16 err = FLD_GET(val, 23, 8);
3097 dsi_show_rx_ack_with_err(err);
3101 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
3102 u8 data = FLD_GET(val, 15, 8);
3103 if (dsi->debug_read)
3104 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
3114 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
3115 u16 data = FLD_GET(val, 23, 8);
3116 if (dsi->debug_read)
3117 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
3124 buf[0] = data & 0xff;
3125 buf[1] = (data >> 8) & 0xff;
3128 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
3130 int len = FLD_GET(val, 23, 8);
3131 if (dsi->debug_read)
3132 DSSDBG("\tDCS long response, len %d\n", len);
3139 /* two byte checksum ends the packet, not included in len */
3140 for (w = 0; w < len + 2;) {
3142 val = dsi_read_reg(dsidev,
3143 DSI_VC_SHORT_PACKET_HEADER(channel));
3144 if (dsi->debug_read)
3145 DSSDBG("\t\t%02x %02x %02x %02x\n",
3149 (val >> 24) & 0xff);
3151 for (b = 0; b < 4; ++b) {
3153 buf[w] = (val >> (b * 8)) & 0xff;
3154 /* we discard the 2 byte checksum */
3161 DSSERR("\tunknown datatype 0x%02x\n", dt);
3168 DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n",
3173 EXPORT_SYMBOL(dsi_vc_dcs_read);
3175 int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3180 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, data, 1);
3190 EXPORT_SYMBOL(dsi_vc_dcs_read_1);
3192 int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3193 u8 *data1, u8 *data2)
3198 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, buf, 2);
3211 EXPORT_SYMBOL(dsi_vc_dcs_read_2);
3213 int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3216 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3218 return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
3221 EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
3223 static int dsi_enter_ulps(struct platform_device *dsidev)
3225 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3226 DECLARE_COMPLETION_ONSTACK(completion);
3231 WARN_ON(!dsi_bus_is_locked(dsidev));
3233 WARN_ON(dsi->ulps_enabled);
3235 if (dsi->ulps_enabled)
3238 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3239 DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
3243 dsi_sync_vc(dsidev, 0);
3244 dsi_sync_vc(dsidev, 1);
3245 dsi_sync_vc(dsidev, 2);
3246 dsi_sync_vc(dsidev, 3);
3248 dsi_force_tx_stop_mode_io(dsidev);
3250 dsi_vc_enable(dsidev, 0, false);
3251 dsi_vc_enable(dsidev, 1, false);
3252 dsi_vc_enable(dsidev, 2, false);
3253 dsi_vc_enable(dsidev, 3, false);
3255 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3256 DSSERR("HS busy when enabling ULPS\n");
3260 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3261 DSSERR("LP busy when enabling ULPS\n");
3265 r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3266 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3270 /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3271 /* LANEx_ULPS_SIG2 */
3272 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
3275 if (wait_for_completion_timeout(&completion,
3276 msecs_to_jiffies(1000)) == 0) {
3277 DSSERR("ULPS enable timeout\n");
3282 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3283 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3285 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3287 dsi_if_enable(dsidev, false);
3289 dsi->ulps_enabled = true;
3294 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3295 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3299 static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3300 unsigned ticks, bool x4, bool x16)
3303 unsigned long total_ticks;
3306 BUG_ON(ticks > 0x1fff);
3308 /* ticks in DSI_FCK */
3309 fck = dsi_fclk_rate(dsidev);
3311 r = dsi_read_reg(dsidev, DSI_TIMING2);
3312 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
3313 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
3314 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
3315 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
3316 dsi_write_reg(dsidev, DSI_TIMING2, r);
3318 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3320 DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3322 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3323 (total_ticks * 1000) / (fck / 1000 / 1000));
3326 static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3330 unsigned long total_ticks;
3333 BUG_ON(ticks > 0x1fff);
3335 /* ticks in DSI_FCK */
3336 fck = dsi_fclk_rate(dsidev);
3338 r = dsi_read_reg(dsidev, DSI_TIMING1);
3339 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
3340 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
3341 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
3342 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
3343 dsi_write_reg(dsidev, DSI_TIMING1, r);
3345 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3347 DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3349 ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3350 (total_ticks * 1000) / (fck / 1000 / 1000));
3353 static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3354 unsigned ticks, bool x4, bool x16)
3357 unsigned long total_ticks;
3360 BUG_ON(ticks > 0x1fff);
3362 /* ticks in DSI_FCK */
3363 fck = dsi_fclk_rate(dsidev);
3365 r = dsi_read_reg(dsidev, DSI_TIMING1);
3366 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
3367 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
3368 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
3369 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
3370 dsi_write_reg(dsidev, DSI_TIMING1, r);
3372 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3374 DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3376 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3377 (total_ticks * 1000) / (fck / 1000 / 1000));
3380 static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3381 unsigned ticks, bool x4, bool x16)
3384 unsigned long total_ticks;
3387 BUG_ON(ticks > 0x1fff);
3389 /* ticks in TxByteClkHS */
3390 fck = dsi_get_txbyteclkhs(dsidev);
3392 r = dsi_read_reg(dsidev, DSI_TIMING2);
3393 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
3394 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
3395 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
3396 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
3397 dsi_write_reg(dsidev, DSI_TIMING2, r);
3399 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3401 DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3403 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3404 (total_ticks * 1000) / (fck / 1000 / 1000));
3406 static int dsi_proto_config(struct omap_dss_device *dssdev)
3408 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3412 dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
3417 dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
3422 /* XXX what values for the timeouts? */
3423 dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
3424 dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
3425 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
3426 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
3428 switch (dssdev->ctrl.pixel_size) {
3442 r = dsi_read_reg(dsidev, DSI_CTRL);
3443 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
3444 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
3445 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
3446 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
3447 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
3448 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
3449 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
3450 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
3451 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
3452 if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
3453 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
3454 /* DCS_CMD_CODE, 1=start, 0=continue */
3455 r = FLD_MOD(r, 0, 25, 25);
3458 dsi_write_reg(dsidev, DSI_CTRL, r);
3460 dsi_vc_initial_config(dsidev, 0);
3461 dsi_vc_initial_config(dsidev, 1);
3462 dsi_vc_initial_config(dsidev, 2);
3463 dsi_vc_initial_config(dsidev, 3);
3468 static void dsi_proto_timings(struct omap_dss_device *dssdev)
3470 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3471 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
3472 unsigned tclk_pre, tclk_post;
3473 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
3474 unsigned ths_trail, ths_exit;
3475 unsigned ddr_clk_pre, ddr_clk_post;
3476 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
3480 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3481 ths_prepare = FLD_GET(r, 31, 24);
3482 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
3483 ths_zero = ths_prepare_ths_zero - ths_prepare;
3484 ths_trail = FLD_GET(r, 15, 8);
3485 ths_exit = FLD_GET(r, 7, 0);
3487 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3488 tlpx = FLD_GET(r, 22, 16) * 2;
3489 tclk_trail = FLD_GET(r, 15, 8);
3490 tclk_zero = FLD_GET(r, 7, 0);
3492 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
3493 tclk_prepare = FLD_GET(r, 7, 0);
3497 /* min 60ns + 52*UI */
3498 tclk_post = ns2ddr(dsidev, 60) + 26;
3500 /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */
3501 if (dssdev->phy.dsi.data1_lane != 0 &&
3502 dssdev->phy.dsi.data2_lane != 0)
3507 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
3509 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
3511 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
3512 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
3514 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3515 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
3516 r = FLD_MOD(r, ddr_clk_post, 7, 0);
3517 dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
3519 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
3523 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
3524 DIV_ROUND_UP(ths_prepare, 4) +
3525 DIV_ROUND_UP(ths_zero + 3, 4);
3527 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
3529 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
3530 FLD_VAL(exit_hs_mode_lat, 15, 0);
3531 dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
3533 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
3534 enter_hs_mode_lat, exit_hs_mode_lat);
3538 #define DSI_DECL_VARS \
3539 int __dsi_cb = 0; u32 __dsi_cv = 0;
3541 #define DSI_FLUSH(dsidev, ch) \
3542 if (__dsi_cb > 0) { \
3543 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
3544 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
3545 __dsi_cb = __dsi_cv = 0; \
3548 #define DSI_PUSH(dsidev, ch, data) \
3550 __dsi_cv |= (data) << (__dsi_cb * 8); \
3551 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
3552 if (++__dsi_cb > 3) \
3553 DSI_FLUSH(dsidev, ch); \
3556 static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3557 int x, int y, int w, int h)
3559 /* Note: supports only 24bit colors in 32bit container */
3560 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3561 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3563 int fifo_stalls = 0;
3564 int max_dsi_packet_size;
3565 int max_data_per_packet;
3566 int max_pixels_per_packet;
3568 int bytespp = dssdev->ctrl.pixel_size / 8;
3574 struct omap_overlay *ovl;
3578 DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
3581 ovl = dssdev->manager->overlays[0];
3583 if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
3586 if (dssdev->ctrl.pixel_size != 24)
3589 scr_width = ovl->info.screen_width;
3590 data = ovl->info.vaddr;
3592 start_offset = scr_width * y + x;
3593 horiz_inc = scr_width - w;
3596 /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
3599 /* When using CPU, max long packet size is TX buffer size */
3600 max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4;
3602 /* we seem to get better perf if we divide the tx fifo to half,
3603 and while the other half is being sent, we fill the other half
3604 max_dsi_packet_size /= 2; */
3606 max_data_per_packet = max_dsi_packet_size - 4 - 1;
3608 max_pixels_per_packet = max_data_per_packet / bytespp;
3610 DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
3612 pixels_left = w * h;
3614 DSSDBG("total pixels %d\n", pixels_left);
3616 data += start_offset;
3618 while (pixels_left > 0) {
3619 /* 0x2c = write_memory_start */
3620 /* 0x3c = write_memory_continue */
3621 u8 dcs_cmd = first ? 0x2c : 0x3c;
3627 /* using fifo not empty */
3628 /* TX_FIFO_NOT_EMPTY */
3629 while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) {
3631 if (fifo_stalls > 0xfffff) {
3632 DSSERR("fifo stalls overflow, pixels left %d\n",
3634 dsi_if_enable(dsidev, 0);
3640 /* using fifo emptiness */
3641 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
3642 max_dsi_packet_size) {
3644 if (fifo_stalls > 0xfffff) {
3645 DSSERR("fifo stalls overflow, pixels left %d\n",
3647 dsi_if_enable(dsidev, 0);
3652 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS,
3653 7, 0) + 1) * 4 == 0) {
3655 if (fifo_stalls > 0xfffff) {
3656 DSSERR("fifo stalls overflow, pixels left %d\n",
3658 dsi_if_enable(dsidev, 0);
3663 pixels = min(max_pixels_per_packet, pixels_left);
3665 pixels_left -= pixels;
3667 dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE,
3668 1 + pixels * bytespp, 0);
3670 DSI_PUSH(dsidev, 0, dcs_cmd);
3672 while (pixels-- > 0) {
3673 u32 pix = __raw_readl(data++);
3675 DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff);
3676 DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff);
3677 DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff);
3680 if (current_x == x+w) {
3686 DSI_FLUSH(dsidev, 0);
3692 static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3693 u16 x, u16 y, u16 w, u16 h)
3695 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3696 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3701 unsigned packet_payload;
3702 unsigned packet_len;
3705 const unsigned channel = dsi->update_channel;
3706 /* line buffer is 1024 x 24bits */
3707 /* XXX: for some reason using full buffer size causes considerable TX
3708 * slowdown with update sizes that fill the whole buffer */
3709 const unsigned line_buf_size = 1023 * 3;
3711 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
3714 dsi_vc_config_vp(dsidev, channel);
3716 bytespp = dssdev->ctrl.pixel_size / 8;
3717 bytespl = w * bytespp;
3718 bytespf = bytespl * h;
3720 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
3721 * number of lines in a packet. See errata about VP_CLK_RATIO */
3723 if (bytespf < line_buf_size)
3724 packet_payload = bytespf;
3726 packet_payload = (line_buf_size) / bytespl * bytespl;
3728 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
3729 total_len = (bytespf / packet_payload) * packet_len;
3731 if (bytespf % packet_payload)
3732 total_len += (bytespf % packet_payload) + 1;
3734 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
3735 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3737 dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3740 if (dsi->te_enabled)
3741 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3743 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
3744 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3746 /* We put SIDLEMODE to no-idle for the duration of the transfer,
3747 * because DSS interrupts are not capable of waking up the CPU and the
3748 * framedone interrupt could be delayed for quite a long time. I think
3749 * the same goes for any DSS interrupts, but for some reason I have not
3750 * seen the problem anywhere else than here.
3752 dispc_disable_sidle();
3754 dsi_perf_mark_start(dsidev);
3756 r = queue_delayed_work(dsi->workqueue, &dsi->framedone_timeout_work,
3757 msecs_to_jiffies(250));
3760 dss_start_update(dssdev);
3762 if (dsi->te_enabled) {
3763 /* disable LP_RX_TO, so that we can receive TE. Time to wait
3764 * for TE is longer than the timer allows */
3765 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
3767 dsi_vc_send_bta(dsidev, channel);
3769 #ifdef DSI_CATCH_MISSING_TE
3770 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3775 #ifdef DSI_CATCH_MISSING_TE
3776 static void dsi_te_timeout(unsigned long arg)
3778 DSSERR("TE not received for 250ms!\n");
3782 static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3784 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3786 /* SIDLEMODE back to smart-idle */
3787 dispc_enable_sidle();
3789 if (dsi->te_enabled) {
3790 /* enable LP_RX_TO again after the TE */
3791 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3794 dsi->framedone_callback(error, dsi->framedone_data);
3797 dsi_perf_show(dsidev, "DISPC");
3800 static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3802 struct dsi_data *dsi = container_of(work, struct dsi_data,
3803 framedone_timeout_work.work);
3804 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
3805 * 250ms which would conflict with this timeout work. What should be
3806 * done is first cancel the transfer on the HW, and then cancel the
3807 * possibly scheduled framedone work. However, cancelling the transfer
3808 * on the HW is buggy, and would probably require resetting the whole
3811 DSSERR("Framedone not received for 250ms!\n");
3813 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
3816 static void dsi_framedone_irq_callback(void *data, u32 mask)
3818 struct omap_dss_device *dssdev = (struct omap_dss_device *) data;
3819 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3820 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3822 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
3823 * turns itself off. However, DSI still has the pixels in its buffers,
3824 * and is sending the data.
3827 __cancel_delayed_work(&dsi->framedone_timeout_work);
3829 dsi_handle_framedone(dsidev, 0);
3831 #ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3832 dispc_fake_vsync_irq();
3836 int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3837 u16 *x, u16 *y, u16 *w, u16 *h,
3838 bool enlarge_update_area)
3840 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3843 dssdev->driver->get_resolution(dssdev, &dw, &dh);
3845 if (*x > dw || *y > dh)
3857 if (*w == 0 || *h == 0)
3860 dsi_perf_mark_setup(dsidev);
3862 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3863 dss_setup_partial_planes(dssdev, x, y, w, h,
3864 enlarge_update_area);
3865 dispc_set_lcd_size(dssdev->manager->id, *w, *h);
3870 EXPORT_SYMBOL(omap_dsi_prepare_update);
3872 int omap_dsi_update(struct omap_dss_device *dssdev,
3874 u16 x, u16 y, u16 w, u16 h,
3875 void (*callback)(int, void *), void *data)
3877 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3878 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3880 dsi->update_channel = channel;
3882 /* OMAP DSS cannot send updates of odd widths.
3883 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
3884 * here to make sure we catch erroneous updates. Otherwise we'll only
3885 * see rather obscure HW error happening, as DSS halts. */
3888 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3889 dsi->framedone_callback = callback;
3890 dsi->framedone_data = data;
3892 dsi->update_region.x = x;
3893 dsi->update_region.y = y;
3894 dsi->update_region.w = w;
3895 dsi->update_region.h = h;
3896 dsi->update_region.device = dssdev;
3898 dsi_update_screen_dispc(dssdev, x, y, w, h);
3902 r = dsi_update_screen_l4(dssdev, x, y, w, h);
3906 dsi_perf_show(dsidev, "L4");
3912 EXPORT_SYMBOL(omap_dsi_update);
3916 static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
3921 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
3922 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
3924 r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
3927 DSSERR("can't get FRAMEDONE irq\n");
3931 dispc_set_lcd_display_type(dssdev->manager->id,
3932 OMAP_DSS_LCD_DISPLAY_TFT);
3934 dispc_set_parallel_interface_mode(dssdev->manager->id,
3935 OMAP_DSS_PARALLELMODE_DSI);
3936 dispc_enable_fifohandcheck(dssdev->manager->id, 1);
3938 dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
3941 struct omap_video_timings timings = {
3950 dispc_set_lcd_timings(dssdev->manager->id, &timings);
3956 static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
3960 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
3961 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
3963 omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
3967 static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3969 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3970 struct dsi_clock_info cinfo;
3973 /* we always use DSS_CLK_SYSCK as input clock */
3974 cinfo.use_sys_clk = true;
3975 cinfo.regn = dssdev->clocks.dsi.regn;
3976 cinfo.regm = dssdev->clocks.dsi.regm;
3977 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
3978 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
3979 r = dsi_calc_clock_rates(dssdev, &cinfo);
3981 DSSERR("Failed to calc dsi clocks\n");
3985 r = dsi_pll_set_clock_div(dsidev, &cinfo);
3987 DSSERR("Failed to set dsi clocks\n");
3994 static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3996 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3997 struct dispc_clock_info dispc_cinfo;
3999 unsigned long long fck;
4001 fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4003 dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
4004 dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
4006 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4008 DSSERR("Failed to calc dispc clocks\n");
4012 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
4014 DSSERR("Failed to set dispc clocks\n");
4021 static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4023 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4024 int dsi_module = dsi_get_dsidev_id(dsidev);
4027 r = dsi_pll_init(dsidev, true, true);
4031 r = dsi_configure_dsi_clocks(dssdev);
4035 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4036 dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
4037 dss_select_lcd_clk_source(dssdev->manager->id,
4038 dssdev->clocks.dispc.channel.lcd_clk_src);
4042 r = dsi_configure_dispc_clocks(dssdev);
4046 r = dsi_cio_init(dssdev);
4050 _dsi_print_reset_status(dsidev);
4052 dsi_proto_timings(dssdev);
4053 dsi_set_lp_clk_divisor(dssdev);
4056 _dsi_print_reset_status(dsidev);
4058 r = dsi_proto_config(dssdev);
4062 /* enable interface */
4063 dsi_vc_enable(dsidev, 0, 1);
4064 dsi_vc_enable(dsidev, 1, 1);
4065 dsi_vc_enable(dsidev, 2, 1);
4066 dsi_vc_enable(dsidev, 3, 1);
4067 dsi_if_enable(dsidev, 1);
4068 dsi_force_tx_stop_mode_io(dsidev);
4072 dsi_cio_uninit(dsidev);
4074 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4075 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4077 dsi_pll_uninit(dsidev, true);
4082 static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4083 bool disconnect_lanes, bool enter_ulps)
4085 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4086 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4087 int dsi_module = dsi_get_dsidev_id(dsidev);
4089 if (enter_ulps && !dsi->ulps_enabled)
4090 dsi_enter_ulps(dsidev);
4092 /* disable interface */
4093 dsi_if_enable(dsidev, 0);
4094 dsi_vc_enable(dsidev, 0, 0);
4095 dsi_vc_enable(dsidev, 1, 0);
4096 dsi_vc_enable(dsidev, 2, 0);
4097 dsi_vc_enable(dsidev, 3, 0);
4099 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4100 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4101 dsi_cio_uninit(dsidev);
4102 dsi_pll_uninit(dsidev, disconnect_lanes);
4105 static int dsi_core_init(struct platform_device *dsidev)
4108 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
4111 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
4113 /* SIDLEMODE smart-idle */
4114 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
4116 _dsi_initialize_irq(dsidev);
4121 int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4123 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4124 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4127 DSSDBG("dsi_display_enable\n");
4129 WARN_ON(!dsi_bus_is_locked(dsidev));
4131 mutex_lock(&dsi->lock);
4133 r = omap_dss_start_device(dssdev);
4135 DSSERR("failed to start device\n");
4140 dsi_enable_pll_clock(dsidev, 1);
4142 r = _dsi_reset(dsidev);
4146 dsi_core_init(dsidev);
4148 r = dsi_display_init_dispc(dssdev);
4152 r = dsi_display_init_dsi(dssdev);
4156 mutex_unlock(&dsi->lock);
4161 dsi_display_uninit_dispc(dssdev);
4164 dsi_enable_pll_clock(dsidev, 0);
4165 omap_dss_stop_device(dssdev);
4167 mutex_unlock(&dsi->lock);
4168 DSSDBG("dsi_display_enable FAILED\n");
4171 EXPORT_SYMBOL(omapdss_dsi_display_enable);
4173 void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4174 bool disconnect_lanes, bool enter_ulps)
4176 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4177 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4179 DSSDBG("dsi_display_disable\n");
4181 WARN_ON(!dsi_bus_is_locked(dsidev));
4183 mutex_lock(&dsi->lock);
4185 dsi_display_uninit_dispc(dssdev);
4187 dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
4190 dsi_enable_pll_clock(dsidev, 0);
4192 omap_dss_stop_device(dssdev);
4194 mutex_unlock(&dsi->lock);
4196 EXPORT_SYMBOL(omapdss_dsi_display_disable);
4198 int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4200 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4201 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4203 dsi->te_enabled = enable;
4206 EXPORT_SYMBOL(omapdss_dsi_enable_te);
4208 void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
4209 u32 fifo_size, enum omap_burst_size *burst_size,
4210 u32 *fifo_low, u32 *fifo_high)
4212 unsigned burst_size_bytes;
4214 *burst_size = OMAP_DSS_BURST_16x32;
4215 burst_size_bytes = 16 * 32 / 8;
4217 *fifo_high = fifo_size - burst_size_bytes;
4218 *fifo_low = fifo_size - burst_size_bytes * 2;
4221 int dsi_init_display(struct omap_dss_device *dssdev)
4223 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4224 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4226 DSSDBG("DSI init\n");
4228 /* XXX these should be figured out dynamically */
4229 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
4230 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
4232 if (dsi->vdds_dsi_reg == NULL) {
4233 struct regulator *vdds_dsi;
4235 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
4237 if (IS_ERR(vdds_dsi)) {
4238 DSSERR("can't get VDDS_DSI regulator\n");
4239 return PTR_ERR(vdds_dsi);
4242 dsi->vdds_dsi_reg = vdds_dsi;
4248 int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4250 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4251 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4254 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4255 if (!dsi->vc[i].dssdev) {
4256 dsi->vc[i].dssdev = dssdev;
4262 DSSERR("cannot get VC for display %s", dssdev->name);
4265 EXPORT_SYMBOL(omap_dsi_request_vc);
4267 int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
4269 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4270 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4272 if (vc_id < 0 || vc_id > 3) {
4273 DSSERR("VC ID out of range\n");
4277 if (channel < 0 || channel > 3) {
4278 DSSERR("Virtual Channel out of range\n");
4282 if (dsi->vc[channel].dssdev != dssdev) {
4283 DSSERR("Virtual Channel not allocated to display %s\n",
4288 dsi->vc[channel].vc_id = vc_id;
4292 EXPORT_SYMBOL(omap_dsi_set_vc_id);
4294 void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
4296 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4297 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4299 if ((channel >= 0 && channel <= 3) &&
4300 dsi->vc[channel].dssdev == dssdev) {
4301 dsi->vc[channel].dssdev = NULL;
4302 dsi->vc[channel].vc_id = 0;
4305 EXPORT_SYMBOL(omap_dsi_release_vc);
4307 void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
4309 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
4310 DSSERR("%s (%s) not active\n",
4311 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
4312 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
4315 void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
4317 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
4318 DSSERR("%s (%s) not active\n",
4319 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
4320 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
4323 static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
4325 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4327 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
4328 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
4329 dsi->regm_dispc_max =
4330 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
4331 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
4332 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
4333 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
4334 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
4337 static int dsi_init(struct platform_device *dsidev)
4339 struct omap_display_platform_data *dss_plat_data;
4340 struct omap_dss_board_info *board_info;
4342 int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
4343 struct resource *dsi_mem;
4344 struct dsi_data *dsi;
4346 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4353 dsi_pdev_map[dsi_module] = dsidev;
4354 dev_set_drvdata(&dsidev->dev, dsi);
4356 dss_plat_data = dsidev->dev.platform_data;
4357 board_info = dss_plat_data->board_data;
4358 dsi->dsi_mux_pads = board_info->dsi_mux_pads;
4360 spin_lock_init(&dsi->irq_lock);
4361 spin_lock_init(&dsi->errors_lock);
4364 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
4365 spin_lock_init(&dsi->irq_stats_lock);
4366 dsi->irq_stats.last_reset = jiffies;
4369 mutex_init(&dsi->lock);
4370 sema_init(&dsi->bus_lock, 1);
4372 dsi->workqueue = create_singlethread_workqueue(dev_name(&dsidev->dev));
4373 if (dsi->workqueue == NULL) {
4378 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
4379 dsi_framedone_timeout_work_callback);
4381 #ifdef DSI_CATCH_MISSING_TE
4382 init_timer(&dsi->te_timer);
4383 dsi->te_timer.function = dsi_te_timeout;
4384 dsi->te_timer.data = 0;
4386 dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
4388 DSSERR("can't get IORESOURCE_MEM DSI\n");
4392 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
4394 DSSERR("can't ioremap DSI\n");
4398 dsi->irq = platform_get_irq(dsi->pdev, 0);
4400 DSSERR("platform_get_irq failed\n");
4405 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
4406 dev_name(&dsidev->dev), dsi->pdev);
4408 DSSERR("request_irq failed\n");
4412 /* DSI VCs initialization */
4413 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4414 dsi->vc[i].mode = DSI_VC_MODE_L4;
4415 dsi->vc[i].dssdev = NULL;
4416 dsi->vc[i].vc_id = 0;
4419 dsi_calc_clock_param_ranges(dsidev);
4423 rev = dsi_read_reg(dsidev, DSI_REVISION);
4424 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
4425 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
4433 destroy_workqueue(dsi->workqueue);
4440 static void dsi_exit(struct platform_device *dsidev)
4442 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4444 if (dsi->vdds_dsi_reg != NULL) {
4445 if (dsi->vdds_dsi_enabled) {
4446 regulator_disable(dsi->vdds_dsi_reg);
4447 dsi->vdds_dsi_enabled = false;
4450 regulator_put(dsi->vdds_dsi_reg);
4451 dsi->vdds_dsi_reg = NULL;
4454 free_irq(dsi->irq, dsi->pdev);
4457 destroy_workqueue(dsi->workqueue);
4460 DSSDBG("omap_dsi_exit\n");
4463 /* DSI1 HW IP initialisation */
4464 static int omap_dsi1hw_probe(struct platform_device *dsidev)
4468 r = dsi_init(dsidev);
4470 DSSERR("Failed to initialize DSI\n");
4477 static int omap_dsi1hw_remove(struct platform_device *dsidev)
4479 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4482 WARN_ON(dsi->scp_clk_refcount > 0);
4486 static struct platform_driver omap_dsi1hw_driver = {
4487 .probe = omap_dsi1hw_probe,
4488 .remove = omap_dsi1hw_remove,
4490 .name = "omapdss_dsi1",
4491 .owner = THIS_MODULE,
4495 int dsi_init_platform_driver(void)
4497 return platform_driver_register(&omap_dsi1hw_driver);
4500 void dsi_uninit_platform_driver(void)
4502 return platform_driver_unregister(&omap_dsi1hw_driver);