312eef6ffcad73c2b1249a89bb9fa8b034e9776f
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / ipu-v3 / ipu-common.c
1 /*
2  * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3  * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2 of the License, or (at your
8  * option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/reset.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/clk.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/irqdomain.h>
30 #include <linux/of_device.h>
31
32 #include <drm/drm_fourcc.h>
33
34 #include <video/imx-ipu-v3.h>
35 #include "ipu-prv.h"
36
37 static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
38 {
39         return readl(ipu->cm_reg + offset);
40 }
41
42 static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
43 {
44         writel(value, ipu->cm_reg + offset);
45 }
46
47 void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
48 {
49         u32 val;
50
51         val = ipu_cm_read(ipu, IPU_SRM_PRI2);
52         val |= 0x8;
53         ipu_cm_write(ipu, val, IPU_SRM_PRI2);
54 }
55 EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
56
57 enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
58 {
59         switch (drm_fourcc) {
60         case DRM_FORMAT_RGB565:
61         case DRM_FORMAT_BGR565:
62         case DRM_FORMAT_RGB888:
63         case DRM_FORMAT_BGR888:
64         case DRM_FORMAT_XRGB8888:
65         case DRM_FORMAT_XBGR8888:
66         case DRM_FORMAT_RGBX8888:
67         case DRM_FORMAT_BGRX8888:
68         case DRM_FORMAT_ARGB8888:
69         case DRM_FORMAT_ABGR8888:
70         case DRM_FORMAT_RGBA8888:
71         case DRM_FORMAT_BGRA8888:
72                 return IPUV3_COLORSPACE_RGB;
73         case DRM_FORMAT_YUYV:
74         case DRM_FORMAT_UYVY:
75         case DRM_FORMAT_YUV420:
76         case DRM_FORMAT_YVU420:
77                 return IPUV3_COLORSPACE_YUV;
78         default:
79                 return IPUV3_COLORSPACE_UNKNOWN;
80         }
81 }
82 EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
83
84 enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
85 {
86         switch (pixelformat) {
87         case V4L2_PIX_FMT_YUV420:
88         case V4L2_PIX_FMT_YVU420:
89         case V4L2_PIX_FMT_UYVY:
90         case V4L2_PIX_FMT_YUYV:
91                 return IPUV3_COLORSPACE_YUV;
92         case V4L2_PIX_FMT_RGB32:
93         case V4L2_PIX_FMT_BGR32:
94         case V4L2_PIX_FMT_RGB24:
95         case V4L2_PIX_FMT_BGR24:
96         case V4L2_PIX_FMT_RGB565:
97                 return IPUV3_COLORSPACE_RGB;
98         default:
99                 return IPUV3_COLORSPACE_UNKNOWN;
100         }
101 }
102 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
103
104 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
105 {
106         struct ipuv3_channel *channel;
107
108         dev_dbg(ipu->dev, "%s %d\n", __func__, num);
109
110         if (num > 63)
111                 return ERR_PTR(-ENODEV);
112
113         mutex_lock(&ipu->channel_lock);
114
115         channel = &ipu->channel[num];
116
117         if (channel->busy) {
118                 channel = ERR_PTR(-EBUSY);
119                 goto out;
120         }
121
122         channel->busy = true;
123         channel->num = num;
124
125 out:
126         mutex_unlock(&ipu->channel_lock);
127
128         return channel;
129 }
130 EXPORT_SYMBOL_GPL(ipu_idmac_get);
131
132 void ipu_idmac_put(struct ipuv3_channel *channel)
133 {
134         struct ipu_soc *ipu = channel->ipu;
135
136         dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
137
138         mutex_lock(&ipu->channel_lock);
139
140         channel->busy = false;
141
142         mutex_unlock(&ipu->channel_lock);
143 }
144 EXPORT_SYMBOL_GPL(ipu_idmac_put);
145
146 #define idma_mask(ch)                   (1 << (ch & 0x1f))
147
148 void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
149                 bool doublebuffer)
150 {
151         struct ipu_soc *ipu = channel->ipu;
152         unsigned long flags;
153         u32 reg;
154
155         spin_lock_irqsave(&ipu->lock, flags);
156
157         reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
158         if (doublebuffer)
159                 reg |= idma_mask(channel->num);
160         else
161                 reg &= ~idma_mask(channel->num);
162         ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
163
164         spin_unlock_irqrestore(&ipu->lock, flags);
165 }
166 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
167
168 int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
169 {
170         unsigned long lock_flags;
171         u32 val;
172
173         spin_lock_irqsave(&ipu->lock, lock_flags);
174
175         val = ipu_cm_read(ipu, IPU_DISP_GEN);
176
177         if (mask & IPU_CONF_DI0_EN)
178                 val |= IPU_DI0_COUNTER_RELEASE;
179         if (mask & IPU_CONF_DI1_EN)
180                 val |= IPU_DI1_COUNTER_RELEASE;
181
182         ipu_cm_write(ipu, val, IPU_DISP_GEN);
183
184         val = ipu_cm_read(ipu, IPU_CONF);
185         val |= mask;
186         ipu_cm_write(ipu, val, IPU_CONF);
187
188         spin_unlock_irqrestore(&ipu->lock, lock_flags);
189
190         return 0;
191 }
192 EXPORT_SYMBOL_GPL(ipu_module_enable);
193
194 int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
195 {
196         unsigned long lock_flags;
197         u32 val;
198
199         spin_lock_irqsave(&ipu->lock, lock_flags);
200
201         val = ipu_cm_read(ipu, IPU_CONF);
202         val &= ~mask;
203         ipu_cm_write(ipu, val, IPU_CONF);
204
205         val = ipu_cm_read(ipu, IPU_DISP_GEN);
206
207         if (mask & IPU_CONF_DI0_EN)
208                 val &= ~IPU_DI0_COUNTER_RELEASE;
209         if (mask & IPU_CONF_DI1_EN)
210                 val &= ~IPU_DI1_COUNTER_RELEASE;
211
212         ipu_cm_write(ipu, val, IPU_DISP_GEN);
213
214         spin_unlock_irqrestore(&ipu->lock, lock_flags);
215
216         return 0;
217 }
218 EXPORT_SYMBOL_GPL(ipu_module_disable);
219
220 int ipu_smfc_enable(struct ipu_soc *ipu)
221 {
222         return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
223 }
224 EXPORT_SYMBOL_GPL(ipu_smfc_enable);
225
226 int ipu_smfc_disable(struct ipu_soc *ipu)
227 {
228         return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
229 }
230 EXPORT_SYMBOL_GPL(ipu_smfc_disable);
231
232 int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
233 {
234         struct ipu_soc *ipu = channel->ipu;
235         unsigned int chno = channel->num;
236
237         return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
238 }
239 EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
240
241 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
242 {
243         struct ipu_soc *ipu = channel->ipu;
244         unsigned int chno = channel->num;
245         unsigned long flags;
246
247         spin_lock_irqsave(&ipu->lock, flags);
248
249         /* Mark buffer as ready. */
250         if (buf_num == 0)
251                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
252         else
253                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
254
255         spin_unlock_irqrestore(&ipu->lock, flags);
256 }
257 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
258
259 int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
260 {
261         struct ipu_soc *ipu = channel->ipu;
262         u32 val;
263         unsigned long flags;
264
265         spin_lock_irqsave(&ipu->lock, flags);
266
267         val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
268         val |= idma_mask(channel->num);
269         ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
270
271         spin_unlock_irqrestore(&ipu->lock, flags);
272
273         return 0;
274 }
275 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
276
277 bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
278 {
279         return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
280 }
281 EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
282
283 int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
284 {
285         struct ipu_soc *ipu = channel->ipu;
286         unsigned long timeout;
287
288         timeout = jiffies + msecs_to_jiffies(ms);
289         while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
290                         idma_mask(channel->num)) {
291                 if (time_after(jiffies, timeout))
292                         return -ETIMEDOUT;
293                 cpu_relax();
294         }
295
296         return 0;
297 }
298 EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
299
300 int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
301 {
302         unsigned long timeout;
303
304         timeout = jiffies + msecs_to_jiffies(ms);
305         ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
306         while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
307                 if (time_after(jiffies, timeout))
308                         return -ETIMEDOUT;
309                 cpu_relax();
310         }
311
312         return 0;
313 }
314 EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
315
316 int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
317 {
318         struct ipu_soc *ipu = channel->ipu;
319         u32 val;
320         unsigned long flags;
321
322         spin_lock_irqsave(&ipu->lock, flags);
323
324         /* Disable DMA channel(s) */
325         val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
326         val &= ~idma_mask(channel->num);
327         ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
328
329         /* Set channel buffers NOT to be ready */
330         ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
331
332         if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
333                         idma_mask(channel->num)) {
334                 ipu_cm_write(ipu, idma_mask(channel->num),
335                              IPU_CHA_BUF0_RDY(channel->num));
336         }
337
338         if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
339                         idma_mask(channel->num)) {
340                 ipu_cm_write(ipu, idma_mask(channel->num),
341                              IPU_CHA_BUF1_RDY(channel->num));
342         }
343
344         ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
345
346         /* Reset the double buffer */
347         val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
348         val &= ~idma_mask(channel->num);
349         ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
350
351         spin_unlock_irqrestore(&ipu->lock, flags);
352
353         return 0;
354 }
355 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
356
357 static int ipu_memory_reset(struct ipu_soc *ipu)
358 {
359         unsigned long timeout;
360
361         ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
362
363         timeout = jiffies + msecs_to_jiffies(1000);
364         while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
365                 if (time_after(jiffies, timeout))
366                         return -ETIME;
367                 cpu_relax();
368         }
369
370         return 0;
371 }
372
373 /*
374  * Set the source mux for the given CSI. Selects either parallel or
375  * MIPI CSI2 sources.
376  */
377 void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
378 {
379         unsigned long flags;
380         u32 val, mask;
381
382         mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
383                 IPU_CONF_CSI0_DATA_SOURCE;
384
385         spin_lock_irqsave(&ipu->lock, flags);
386
387         val = ipu_cm_read(ipu, IPU_CONF);
388         if (mipi_csi2)
389                 val |= mask;
390         else
391                 val &= ~mask;
392         ipu_cm_write(ipu, val, IPU_CONF);
393
394         spin_unlock_irqrestore(&ipu->lock, flags);
395 }
396 EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
397
398 /*
399  * Set the source mux for the IC. Selects either CSI[01] or the VDI.
400  */
401 void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
402 {
403         unsigned long flags;
404         u32 val;
405
406         spin_lock_irqsave(&ipu->lock, flags);
407
408         val = ipu_cm_read(ipu, IPU_CONF);
409         if (vdi) {
410                 val |= IPU_CONF_IC_INPUT;
411         } else {
412                 val &= ~IPU_CONF_IC_INPUT;
413                 if (csi_id == 1)
414                         val |= IPU_CONF_CSI_SEL;
415                 else
416                         val &= ~IPU_CONF_CSI_SEL;
417         }
418         ipu_cm_write(ipu, val, IPU_CONF);
419
420         spin_unlock_irqrestore(&ipu->lock, flags);
421 }
422 EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
423
424 struct ipu_devtype {
425         const char *name;
426         unsigned long cm_ofs;
427         unsigned long cpmem_ofs;
428         unsigned long srm_ofs;
429         unsigned long tpm_ofs;
430         unsigned long csi0_ofs;
431         unsigned long csi1_ofs;
432         unsigned long ic_ofs;
433         unsigned long disp0_ofs;
434         unsigned long disp1_ofs;
435         unsigned long dc_tmpl_ofs;
436         unsigned long vdi_ofs;
437         enum ipuv3_type type;
438 };
439
440 static struct ipu_devtype ipu_type_imx51 = {
441         .name = "IPUv3EX",
442         .cm_ofs = 0x1e000000,
443         .cpmem_ofs = 0x1f000000,
444         .srm_ofs = 0x1f040000,
445         .tpm_ofs = 0x1f060000,
446         .csi0_ofs = 0x1f030000,
447         .csi1_ofs = 0x1f038000,
448         .ic_ofs = 0x1f020000,
449         .disp0_ofs = 0x1e040000,
450         .disp1_ofs = 0x1e048000,
451         .dc_tmpl_ofs = 0x1f080000,
452         .vdi_ofs = 0x1e068000,
453         .type = IPUV3EX,
454 };
455
456 static struct ipu_devtype ipu_type_imx53 = {
457         .name = "IPUv3M",
458         .cm_ofs = 0x06000000,
459         .cpmem_ofs = 0x07000000,
460         .srm_ofs = 0x07040000,
461         .tpm_ofs = 0x07060000,
462         .csi0_ofs = 0x07030000,
463         .csi1_ofs = 0x07038000,
464         .ic_ofs = 0x07020000,
465         .disp0_ofs = 0x06040000,
466         .disp1_ofs = 0x06048000,
467         .dc_tmpl_ofs = 0x07080000,
468         .vdi_ofs = 0x06068000,
469         .type = IPUV3M,
470 };
471
472 static struct ipu_devtype ipu_type_imx6q = {
473         .name = "IPUv3H",
474         .cm_ofs = 0x00200000,
475         .cpmem_ofs = 0x00300000,
476         .srm_ofs = 0x00340000,
477         .tpm_ofs = 0x00360000,
478         .csi0_ofs = 0x00230000,
479         .csi1_ofs = 0x00238000,
480         .ic_ofs = 0x00220000,
481         .disp0_ofs = 0x00240000,
482         .disp1_ofs = 0x00248000,
483         .dc_tmpl_ofs = 0x00380000,
484         .vdi_ofs = 0x00268000,
485         .type = IPUV3H,
486 };
487
488 static const struct of_device_id imx_ipu_dt_ids[] = {
489         { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
490         { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
491         { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
492         { /* sentinel */ }
493 };
494 MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
495
496 static int ipu_submodules_init(struct ipu_soc *ipu,
497                 struct platform_device *pdev, unsigned long ipu_base,
498                 struct clk *ipu_clk)
499 {
500         char *unit;
501         int ret;
502         struct device *dev = &pdev->dev;
503         const struct ipu_devtype *devtype = ipu->devtype;
504
505         ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
506         if (ret) {
507                 unit = "cpmem";
508                 goto err_cpmem;
509         }
510
511         ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
512                            IPU_CONF_CSI0_EN, ipu_clk);
513         if (ret) {
514                 unit = "csi0";
515                 goto err_csi_0;
516         }
517
518         ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
519                            IPU_CONF_CSI1_EN, ipu_clk);
520         if (ret) {
521                 unit = "csi1";
522                 goto err_csi_1;
523         }
524
525         ret = ipu_ic_init(ipu, dev,
526                           ipu_base + devtype->ic_ofs,
527                           ipu_base + devtype->tpm_ofs);
528         if (ret) {
529                 unit = "ic";
530                 goto err_ic;
531         }
532
533         ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
534                           IPU_CONF_DI0_EN, ipu_clk);
535         if (ret) {
536                 unit = "di0";
537                 goto err_di_0;
538         }
539
540         ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
541                         IPU_CONF_DI1_EN, ipu_clk);
542         if (ret) {
543                 unit = "di1";
544                 goto err_di_1;
545         }
546
547         ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
548                         IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
549         if (ret) {
550                 unit = "dc_template";
551                 goto err_dc;
552         }
553
554         ret = ipu_dmfc_init(ipu, dev, ipu_base +
555                         devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
556         if (ret) {
557                 unit = "dmfc";
558                 goto err_dmfc;
559         }
560
561         ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
562         if (ret) {
563                 unit = "dp";
564                 goto err_dp;
565         }
566
567         ret = ipu_smfc_init(ipu, dev, ipu_base +
568                         devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
569         if (ret) {
570                 unit = "smfc";
571                 goto err_smfc;
572         }
573
574         return 0;
575
576 err_smfc:
577         ipu_dp_exit(ipu);
578 err_dp:
579         ipu_dmfc_exit(ipu);
580 err_dmfc:
581         ipu_dc_exit(ipu);
582 err_dc:
583         ipu_di_exit(ipu, 1);
584 err_di_1:
585         ipu_di_exit(ipu, 0);
586 err_di_0:
587         ipu_ic_exit(ipu);
588 err_ic:
589         ipu_csi_exit(ipu, 1);
590 err_csi_1:
591         ipu_csi_exit(ipu, 0);
592 err_csi_0:
593         ipu_cpmem_exit(ipu);
594 err_cpmem:
595         dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
596         return ret;
597 }
598
599 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
600 {
601         unsigned long status;
602         int i, bit, irq;
603
604         for (i = 0; i < num_regs; i++) {
605
606                 status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
607                 status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
608
609                 for_each_set_bit(bit, &status, 32) {
610                         irq = irq_linear_revmap(ipu->domain,
611                                                 regs[i] * 32 + bit);
612                         if (irq)
613                                 generic_handle_irq(irq);
614                 }
615         }
616 }
617
618 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
619 {
620         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
621         const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
622         struct irq_chip *chip = irq_get_chip(irq);
623
624         chained_irq_enter(chip, desc);
625
626         ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
627
628         chained_irq_exit(chip, desc);
629 }
630
631 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
632 {
633         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
634         const int int_reg[] = { 4, 5, 8, 9};
635         struct irq_chip *chip = irq_get_chip(irq);
636
637         chained_irq_enter(chip, desc);
638
639         ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
640
641         chained_irq_exit(chip, desc);
642 }
643
644 int ipu_map_irq(struct ipu_soc *ipu, int irq)
645 {
646         int virq;
647
648         virq = irq_linear_revmap(ipu->domain, irq);
649         if (!virq)
650                 virq = irq_create_mapping(ipu->domain, irq);
651
652         return virq;
653 }
654 EXPORT_SYMBOL_GPL(ipu_map_irq);
655
656 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
657                 enum ipu_channel_irq irq_type)
658 {
659         return ipu_map_irq(ipu, irq_type + channel->num);
660 }
661 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
662
663 static void ipu_submodules_exit(struct ipu_soc *ipu)
664 {
665         ipu_smfc_exit(ipu);
666         ipu_dp_exit(ipu);
667         ipu_dmfc_exit(ipu);
668         ipu_dc_exit(ipu);
669         ipu_di_exit(ipu, 1);
670         ipu_di_exit(ipu, 0);
671         ipu_ic_exit(ipu);
672         ipu_csi_exit(ipu, 1);
673         ipu_csi_exit(ipu, 0);
674         ipu_cpmem_exit(ipu);
675 }
676
677 static int platform_remove_devices_fn(struct device *dev, void *unused)
678 {
679         struct platform_device *pdev = to_platform_device(dev);
680
681         platform_device_unregister(pdev);
682
683         return 0;
684 }
685
686 static void platform_device_unregister_children(struct platform_device *pdev)
687 {
688         device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
689 }
690
691 struct ipu_platform_reg {
692         struct ipu_client_platformdata pdata;
693         const char *name;
694         int reg_offset;
695 };
696
697 static const struct ipu_platform_reg client_reg[] = {
698         {
699                 .pdata = {
700                         .di = 0,
701                         .dc = 5,
702                         .dp = IPU_DP_FLOW_SYNC_BG,
703                         .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
704                         .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
705                 },
706                 .name = "imx-ipuv3-crtc",
707         }, {
708                 .pdata = {
709                         .di = 1,
710                         .dc = 1,
711                         .dp = -EINVAL,
712                         .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
713                         .dma[1] = -EINVAL,
714                 },
715                 .name = "imx-ipuv3-crtc",
716         }, {
717                 .pdata = {
718                         .csi = 0,
719                         .dma[0] = IPUV3_CHANNEL_CSI0,
720                         .dma[1] = -EINVAL,
721                 },
722                 .reg_offset = IPU_CM_CSI0_REG_OFS,
723                 .name = "imx-ipuv3-camera",
724         }, {
725                 .pdata = {
726                         .csi = 1,
727                         .dma[0] = IPUV3_CHANNEL_CSI1,
728                         .dma[1] = -EINVAL,
729                 },
730                 .reg_offset = IPU_CM_CSI1_REG_OFS,
731                 .name = "imx-ipuv3-camera",
732         },
733 };
734
735 static DEFINE_MUTEX(ipu_client_id_mutex);
736 static int ipu_client_id;
737
738 static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
739 {
740         struct device *dev = ipu->dev;
741         unsigned i;
742         int id, ret;
743
744         mutex_lock(&ipu_client_id_mutex);
745         id = ipu_client_id;
746         ipu_client_id += ARRAY_SIZE(client_reg);
747         mutex_unlock(&ipu_client_id_mutex);
748
749         for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
750                 const struct ipu_platform_reg *reg = &client_reg[i];
751                 struct platform_device *pdev;
752                 struct resource res;
753
754                 if (reg->reg_offset) {
755                         memset(&res, 0, sizeof(res));
756                         res.flags = IORESOURCE_MEM;
757                         res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
758                         res.end = res.start + PAGE_SIZE - 1;
759                         pdev = platform_device_register_resndata(dev, reg->name,
760                                 id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
761                 } else {
762                         pdev = platform_device_register_data(dev, reg->name,
763                                 id++, &reg->pdata, sizeof(reg->pdata));
764                 }
765
766                 if (IS_ERR(pdev))
767                         goto err_register;
768         }
769
770         return 0;
771
772 err_register:
773         platform_device_unregister_children(to_platform_device(dev));
774
775         return ret;
776 }
777
778
779 static int ipu_irq_init(struct ipu_soc *ipu)
780 {
781         struct irq_chip_generic *gc;
782         struct irq_chip_type *ct;
783         unsigned long unused[IPU_NUM_IRQS / 32] = {
784                 0x400100d0, 0xffe000fd,
785                 0x400100d0, 0xffe000fd,
786                 0x400100d0, 0xffe000fd,
787                 0x4077ffff, 0xffe7e1fd,
788                 0x23fffffe, 0x8880fff0,
789                 0xf98fe7d0, 0xfff81fff,
790                 0x400100d0, 0xffe000fd,
791                 0x00000000,
792         };
793         int ret, i;
794
795         ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
796                                             &irq_generic_chip_ops, ipu);
797         if (!ipu->domain) {
798                 dev_err(ipu->dev, "failed to add irq domain\n");
799                 return -ENODEV;
800         }
801
802         ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
803                                              handle_level_irq, 0,
804                                              IRQF_VALID, 0);
805         if (ret < 0) {
806                 dev_err(ipu->dev, "failed to alloc generic irq chips\n");
807                 irq_domain_remove(ipu->domain);
808                 return ret;
809         }
810
811         for (i = 0; i < IPU_NUM_IRQS; i += 32) {
812                 gc = irq_get_domain_generic_chip(ipu->domain, i);
813                 gc->reg_base = ipu->cm_reg;
814                 gc->unused = unused[i / 32];
815                 ct = gc->chip_types;
816                 ct->chip.irq_ack = irq_gc_ack_set_bit;
817                 ct->chip.irq_mask = irq_gc_mask_clr_bit;
818                 ct->chip.irq_unmask = irq_gc_mask_set_bit;
819                 ct->regs.ack = IPU_INT_STAT(i / 32);
820                 ct->regs.mask = IPU_INT_CTRL(i / 32);
821         }
822
823         irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
824         irq_set_handler_data(ipu->irq_sync, ipu);
825         irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
826         irq_set_handler_data(ipu->irq_err, ipu);
827
828         return 0;
829 }
830
831 static void ipu_irq_exit(struct ipu_soc *ipu)
832 {
833         int i, irq;
834
835         irq_set_chained_handler(ipu->irq_err, NULL);
836         irq_set_handler_data(ipu->irq_err, NULL);
837         irq_set_chained_handler(ipu->irq_sync, NULL);
838         irq_set_handler_data(ipu->irq_sync, NULL);
839
840         /* TODO: remove irq_domain_generic_chips */
841
842         for (i = 0; i < IPU_NUM_IRQS; i++) {
843                 irq = irq_linear_revmap(ipu->domain, i);
844                 if (irq)
845                         irq_dispose_mapping(irq);
846         }
847
848         irq_domain_remove(ipu->domain);
849 }
850
851 static int ipu_probe(struct platform_device *pdev)
852 {
853         const struct of_device_id *of_id =
854                         of_match_device(imx_ipu_dt_ids, &pdev->dev);
855         struct ipu_soc *ipu;
856         struct resource *res;
857         unsigned long ipu_base;
858         int i, ret, irq_sync, irq_err;
859         const struct ipu_devtype *devtype;
860
861         devtype = of_id->data;
862
863         irq_sync = platform_get_irq(pdev, 0);
864         irq_err = platform_get_irq(pdev, 1);
865         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
866
867         dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
868                         irq_sync, irq_err);
869
870         if (!res || irq_sync < 0 || irq_err < 0)
871                 return -ENODEV;
872
873         ipu_base = res->start;
874
875         ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
876         if (!ipu)
877                 return -ENODEV;
878
879         for (i = 0; i < 64; i++)
880                 ipu->channel[i].ipu = ipu;
881         ipu->devtype = devtype;
882         ipu->ipu_type = devtype->type;
883
884         spin_lock_init(&ipu->lock);
885         mutex_init(&ipu->channel_lock);
886
887         dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
888                         ipu_base + devtype->cm_ofs);
889         dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
890                         ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
891         dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
892                         ipu_base + devtype->cpmem_ofs);
893         dev_dbg(&pdev->dev, "csi0:    0x%08lx\n",
894                         ipu_base + devtype->csi0_ofs);
895         dev_dbg(&pdev->dev, "csi1:    0x%08lx\n",
896                         ipu_base + devtype->csi1_ofs);
897         dev_dbg(&pdev->dev, "ic:      0x%08lx\n",
898                         ipu_base + devtype->ic_ofs);
899         dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
900                         ipu_base + devtype->disp0_ofs);
901         dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
902                         ipu_base + devtype->disp1_ofs);
903         dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
904                         ipu_base + devtype->srm_ofs);
905         dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
906                         ipu_base + devtype->tpm_ofs);
907         dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
908                         ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
909         dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
910                         ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
911         dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
912                         ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
913         dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
914                         ipu_base + devtype->vdi_ofs);
915
916         ipu->cm_reg = devm_ioremap(&pdev->dev,
917                         ipu_base + devtype->cm_ofs, PAGE_SIZE);
918         ipu->idmac_reg = devm_ioremap(&pdev->dev,
919                         ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
920                         PAGE_SIZE);
921
922         if (!ipu->cm_reg || !ipu->idmac_reg)
923                 return -ENOMEM;
924
925         ipu->clk = devm_clk_get(&pdev->dev, "bus");
926         if (IS_ERR(ipu->clk)) {
927                 ret = PTR_ERR(ipu->clk);
928                 dev_err(&pdev->dev, "clk_get failed with %d", ret);
929                 return ret;
930         }
931
932         platform_set_drvdata(pdev, ipu);
933
934         ret = clk_prepare_enable(ipu->clk);
935         if (ret) {
936                 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
937                 return ret;
938         }
939
940         ipu->dev = &pdev->dev;
941         ipu->irq_sync = irq_sync;
942         ipu->irq_err = irq_err;
943
944         ret = ipu_irq_init(ipu);
945         if (ret)
946                 goto out_failed_irq;
947
948         ret = device_reset(&pdev->dev);
949         if (ret) {
950                 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
951                 goto out_failed_reset;
952         }
953         ret = ipu_memory_reset(ipu);
954         if (ret)
955                 goto out_failed_reset;
956
957         /* Set MCU_T to divide MCU access window into 2 */
958         ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
959                         IPU_DISP_GEN);
960
961         ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
962         if (ret)
963                 goto failed_submodules_init;
964
965         ret = ipu_add_client_devices(ipu, ipu_base);
966         if (ret) {
967                 dev_err(&pdev->dev, "adding client devices failed with %d\n",
968                                 ret);
969                 goto failed_add_clients;
970         }
971
972         dev_info(&pdev->dev, "%s probed\n", devtype->name);
973
974         return 0;
975
976 failed_add_clients:
977         ipu_submodules_exit(ipu);
978 failed_submodules_init:
979 out_failed_reset:
980         ipu_irq_exit(ipu);
981 out_failed_irq:
982         clk_disable_unprepare(ipu->clk);
983         return ret;
984 }
985
986 static int ipu_remove(struct platform_device *pdev)
987 {
988         struct ipu_soc *ipu = platform_get_drvdata(pdev);
989
990         platform_device_unregister_children(pdev);
991         ipu_submodules_exit(ipu);
992         ipu_irq_exit(ipu);
993
994         clk_disable_unprepare(ipu->clk);
995
996         return 0;
997 }
998
999 static struct platform_driver imx_ipu_driver = {
1000         .driver = {
1001                 .name = "imx-ipuv3",
1002                 .of_match_table = imx_ipu_dt_ids,
1003         },
1004         .probe = ipu_probe,
1005         .remove = ipu_remove,
1006 };
1007
1008 module_platform_driver(imx_ipu_driver);
1009
1010 MODULE_ALIAS("platform:imx-ipuv3");
1011 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1012 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1013 MODULE_LICENSE("GPL");