gpu: ipu-v3: smfc: Move enable/disable to ipu-smfc.c
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / ipu-v3 / ipu-common.c
1 /*
2  * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3  * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2 of the License, or (at your
8  * option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/reset.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/clk.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/irqdomain.h>
30 #include <linux/of_device.h>
31
32 #include <drm/drm_fourcc.h>
33
34 #include <video/imx-ipu-v3.h>
35 #include "ipu-prv.h"
36
37 static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
38 {
39         return readl(ipu->cm_reg + offset);
40 }
41
42 static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
43 {
44         writel(value, ipu->cm_reg + offset);
45 }
46
47 void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
48 {
49         u32 val;
50
51         val = ipu_cm_read(ipu, IPU_SRM_PRI2);
52         val |= 0x8;
53         ipu_cm_write(ipu, val, IPU_SRM_PRI2);
54 }
55 EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
56
57 enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
58 {
59         switch (drm_fourcc) {
60         case DRM_FORMAT_RGB565:
61         case DRM_FORMAT_BGR565:
62         case DRM_FORMAT_RGB888:
63         case DRM_FORMAT_BGR888:
64         case DRM_FORMAT_XRGB8888:
65         case DRM_FORMAT_XBGR8888:
66         case DRM_FORMAT_RGBX8888:
67         case DRM_FORMAT_BGRX8888:
68         case DRM_FORMAT_ARGB8888:
69         case DRM_FORMAT_ABGR8888:
70         case DRM_FORMAT_RGBA8888:
71         case DRM_FORMAT_BGRA8888:
72                 return IPUV3_COLORSPACE_RGB;
73         case DRM_FORMAT_YUYV:
74         case DRM_FORMAT_UYVY:
75         case DRM_FORMAT_YUV420:
76         case DRM_FORMAT_YVU420:
77                 return IPUV3_COLORSPACE_YUV;
78         default:
79                 return IPUV3_COLORSPACE_UNKNOWN;
80         }
81 }
82 EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
83
84 enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
85 {
86         switch (pixelformat) {
87         case V4L2_PIX_FMT_YUV420:
88         case V4L2_PIX_FMT_YVU420:
89         case V4L2_PIX_FMT_UYVY:
90         case V4L2_PIX_FMT_YUYV:
91                 return IPUV3_COLORSPACE_YUV;
92         case V4L2_PIX_FMT_RGB32:
93         case V4L2_PIX_FMT_BGR32:
94         case V4L2_PIX_FMT_RGB24:
95         case V4L2_PIX_FMT_BGR24:
96         case V4L2_PIX_FMT_RGB565:
97                 return IPUV3_COLORSPACE_RGB;
98         default:
99                 return IPUV3_COLORSPACE_UNKNOWN;
100         }
101 }
102 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
103
104 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
105 {
106         struct ipuv3_channel *channel;
107
108         dev_dbg(ipu->dev, "%s %d\n", __func__, num);
109
110         if (num > 63)
111                 return ERR_PTR(-ENODEV);
112
113         mutex_lock(&ipu->channel_lock);
114
115         channel = &ipu->channel[num];
116
117         if (channel->busy) {
118                 channel = ERR_PTR(-EBUSY);
119                 goto out;
120         }
121
122         channel->busy = true;
123         channel->num = num;
124
125 out:
126         mutex_unlock(&ipu->channel_lock);
127
128         return channel;
129 }
130 EXPORT_SYMBOL_GPL(ipu_idmac_get);
131
132 void ipu_idmac_put(struct ipuv3_channel *channel)
133 {
134         struct ipu_soc *ipu = channel->ipu;
135
136         dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
137
138         mutex_lock(&ipu->channel_lock);
139
140         channel->busy = false;
141
142         mutex_unlock(&ipu->channel_lock);
143 }
144 EXPORT_SYMBOL_GPL(ipu_idmac_put);
145
146 #define idma_mask(ch)                   (1 << (ch & 0x1f))
147
148 void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
149                 bool doublebuffer)
150 {
151         struct ipu_soc *ipu = channel->ipu;
152         unsigned long flags;
153         u32 reg;
154
155         spin_lock_irqsave(&ipu->lock, flags);
156
157         reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
158         if (doublebuffer)
159                 reg |= idma_mask(channel->num);
160         else
161                 reg &= ~idma_mask(channel->num);
162         ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
163
164         spin_unlock_irqrestore(&ipu->lock, flags);
165 }
166 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
167
168 int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
169 {
170         unsigned long lock_flags;
171         u32 val;
172
173         spin_lock_irqsave(&ipu->lock, lock_flags);
174
175         val = ipu_cm_read(ipu, IPU_DISP_GEN);
176
177         if (mask & IPU_CONF_DI0_EN)
178                 val |= IPU_DI0_COUNTER_RELEASE;
179         if (mask & IPU_CONF_DI1_EN)
180                 val |= IPU_DI1_COUNTER_RELEASE;
181
182         ipu_cm_write(ipu, val, IPU_DISP_GEN);
183
184         val = ipu_cm_read(ipu, IPU_CONF);
185         val |= mask;
186         ipu_cm_write(ipu, val, IPU_CONF);
187
188         spin_unlock_irqrestore(&ipu->lock, lock_flags);
189
190         return 0;
191 }
192 EXPORT_SYMBOL_GPL(ipu_module_enable);
193
194 int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
195 {
196         unsigned long lock_flags;
197         u32 val;
198
199         spin_lock_irqsave(&ipu->lock, lock_flags);
200
201         val = ipu_cm_read(ipu, IPU_CONF);
202         val &= ~mask;
203         ipu_cm_write(ipu, val, IPU_CONF);
204
205         val = ipu_cm_read(ipu, IPU_DISP_GEN);
206
207         if (mask & IPU_CONF_DI0_EN)
208                 val &= ~IPU_DI0_COUNTER_RELEASE;
209         if (mask & IPU_CONF_DI1_EN)
210                 val &= ~IPU_DI1_COUNTER_RELEASE;
211
212         ipu_cm_write(ipu, val, IPU_DISP_GEN);
213
214         spin_unlock_irqrestore(&ipu->lock, lock_flags);
215
216         return 0;
217 }
218 EXPORT_SYMBOL_GPL(ipu_module_disable);
219
220 int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
221 {
222         struct ipu_soc *ipu = channel->ipu;
223         unsigned int chno = channel->num;
224
225         return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
226 }
227 EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
228
229 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
230 {
231         struct ipu_soc *ipu = channel->ipu;
232         unsigned int chno = channel->num;
233         unsigned long flags;
234
235         spin_lock_irqsave(&ipu->lock, flags);
236
237         /* Mark buffer as ready. */
238         if (buf_num == 0)
239                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
240         else
241                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
242
243         spin_unlock_irqrestore(&ipu->lock, flags);
244 }
245 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
246
247 int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
248 {
249         struct ipu_soc *ipu = channel->ipu;
250         u32 val;
251         unsigned long flags;
252
253         spin_lock_irqsave(&ipu->lock, flags);
254
255         val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
256         val |= idma_mask(channel->num);
257         ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
258
259         spin_unlock_irqrestore(&ipu->lock, flags);
260
261         return 0;
262 }
263 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
264
265 bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
266 {
267         return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
268 }
269 EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
270
271 int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
272 {
273         struct ipu_soc *ipu = channel->ipu;
274         unsigned long timeout;
275
276         timeout = jiffies + msecs_to_jiffies(ms);
277         while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
278                         idma_mask(channel->num)) {
279                 if (time_after(jiffies, timeout))
280                         return -ETIMEDOUT;
281                 cpu_relax();
282         }
283
284         return 0;
285 }
286 EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
287
288 int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
289 {
290         unsigned long timeout;
291
292         timeout = jiffies + msecs_to_jiffies(ms);
293         ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
294         while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
295                 if (time_after(jiffies, timeout))
296                         return -ETIMEDOUT;
297                 cpu_relax();
298         }
299
300         return 0;
301 }
302 EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
303
304 int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
305 {
306         struct ipu_soc *ipu = channel->ipu;
307         u32 val;
308         unsigned long flags;
309
310         spin_lock_irqsave(&ipu->lock, flags);
311
312         /* Disable DMA channel(s) */
313         val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
314         val &= ~idma_mask(channel->num);
315         ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
316
317         /* Set channel buffers NOT to be ready */
318         ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
319
320         if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
321                         idma_mask(channel->num)) {
322                 ipu_cm_write(ipu, idma_mask(channel->num),
323                              IPU_CHA_BUF0_RDY(channel->num));
324         }
325
326         if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
327                         idma_mask(channel->num)) {
328                 ipu_cm_write(ipu, idma_mask(channel->num),
329                              IPU_CHA_BUF1_RDY(channel->num));
330         }
331
332         ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
333
334         /* Reset the double buffer */
335         val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
336         val &= ~idma_mask(channel->num);
337         ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
338
339         spin_unlock_irqrestore(&ipu->lock, flags);
340
341         return 0;
342 }
343 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
344
345 static int ipu_memory_reset(struct ipu_soc *ipu)
346 {
347         unsigned long timeout;
348
349         ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
350
351         timeout = jiffies + msecs_to_jiffies(1000);
352         while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
353                 if (time_after(jiffies, timeout))
354                         return -ETIME;
355                 cpu_relax();
356         }
357
358         return 0;
359 }
360
361 /*
362  * Set the source mux for the given CSI. Selects either parallel or
363  * MIPI CSI2 sources.
364  */
365 void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
366 {
367         unsigned long flags;
368         u32 val, mask;
369
370         mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
371                 IPU_CONF_CSI0_DATA_SOURCE;
372
373         spin_lock_irqsave(&ipu->lock, flags);
374
375         val = ipu_cm_read(ipu, IPU_CONF);
376         if (mipi_csi2)
377                 val |= mask;
378         else
379                 val &= ~mask;
380         ipu_cm_write(ipu, val, IPU_CONF);
381
382         spin_unlock_irqrestore(&ipu->lock, flags);
383 }
384 EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
385
386 /*
387  * Set the source mux for the IC. Selects either CSI[01] or the VDI.
388  */
389 void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
390 {
391         unsigned long flags;
392         u32 val;
393
394         spin_lock_irqsave(&ipu->lock, flags);
395
396         val = ipu_cm_read(ipu, IPU_CONF);
397         if (vdi) {
398                 val |= IPU_CONF_IC_INPUT;
399         } else {
400                 val &= ~IPU_CONF_IC_INPUT;
401                 if (csi_id == 1)
402                         val |= IPU_CONF_CSI_SEL;
403                 else
404                         val &= ~IPU_CONF_CSI_SEL;
405         }
406         ipu_cm_write(ipu, val, IPU_CONF);
407
408         spin_unlock_irqrestore(&ipu->lock, flags);
409 }
410 EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
411
412 struct ipu_devtype {
413         const char *name;
414         unsigned long cm_ofs;
415         unsigned long cpmem_ofs;
416         unsigned long srm_ofs;
417         unsigned long tpm_ofs;
418         unsigned long csi0_ofs;
419         unsigned long csi1_ofs;
420         unsigned long ic_ofs;
421         unsigned long disp0_ofs;
422         unsigned long disp1_ofs;
423         unsigned long dc_tmpl_ofs;
424         unsigned long vdi_ofs;
425         enum ipuv3_type type;
426 };
427
428 static struct ipu_devtype ipu_type_imx51 = {
429         .name = "IPUv3EX",
430         .cm_ofs = 0x1e000000,
431         .cpmem_ofs = 0x1f000000,
432         .srm_ofs = 0x1f040000,
433         .tpm_ofs = 0x1f060000,
434         .csi0_ofs = 0x1f030000,
435         .csi1_ofs = 0x1f038000,
436         .ic_ofs = 0x1f020000,
437         .disp0_ofs = 0x1e040000,
438         .disp1_ofs = 0x1e048000,
439         .dc_tmpl_ofs = 0x1f080000,
440         .vdi_ofs = 0x1e068000,
441         .type = IPUV3EX,
442 };
443
444 static struct ipu_devtype ipu_type_imx53 = {
445         .name = "IPUv3M",
446         .cm_ofs = 0x06000000,
447         .cpmem_ofs = 0x07000000,
448         .srm_ofs = 0x07040000,
449         .tpm_ofs = 0x07060000,
450         .csi0_ofs = 0x07030000,
451         .csi1_ofs = 0x07038000,
452         .ic_ofs = 0x07020000,
453         .disp0_ofs = 0x06040000,
454         .disp1_ofs = 0x06048000,
455         .dc_tmpl_ofs = 0x07080000,
456         .vdi_ofs = 0x06068000,
457         .type = IPUV3M,
458 };
459
460 static struct ipu_devtype ipu_type_imx6q = {
461         .name = "IPUv3H",
462         .cm_ofs = 0x00200000,
463         .cpmem_ofs = 0x00300000,
464         .srm_ofs = 0x00340000,
465         .tpm_ofs = 0x00360000,
466         .csi0_ofs = 0x00230000,
467         .csi1_ofs = 0x00238000,
468         .ic_ofs = 0x00220000,
469         .disp0_ofs = 0x00240000,
470         .disp1_ofs = 0x00248000,
471         .dc_tmpl_ofs = 0x00380000,
472         .vdi_ofs = 0x00268000,
473         .type = IPUV3H,
474 };
475
476 static const struct of_device_id imx_ipu_dt_ids[] = {
477         { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
478         { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
479         { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
480         { /* sentinel */ }
481 };
482 MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
483
484 static int ipu_submodules_init(struct ipu_soc *ipu,
485                 struct platform_device *pdev, unsigned long ipu_base,
486                 struct clk *ipu_clk)
487 {
488         char *unit;
489         int ret;
490         struct device *dev = &pdev->dev;
491         const struct ipu_devtype *devtype = ipu->devtype;
492
493         ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
494         if (ret) {
495                 unit = "cpmem";
496                 goto err_cpmem;
497         }
498
499         ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
500                            IPU_CONF_CSI0_EN, ipu_clk);
501         if (ret) {
502                 unit = "csi0";
503                 goto err_csi_0;
504         }
505
506         ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
507                            IPU_CONF_CSI1_EN, ipu_clk);
508         if (ret) {
509                 unit = "csi1";
510                 goto err_csi_1;
511         }
512
513         ret = ipu_ic_init(ipu, dev,
514                           ipu_base + devtype->ic_ofs,
515                           ipu_base + devtype->tpm_ofs);
516         if (ret) {
517                 unit = "ic";
518                 goto err_ic;
519         }
520
521         ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
522                           IPU_CONF_DI0_EN, ipu_clk);
523         if (ret) {
524                 unit = "di0";
525                 goto err_di_0;
526         }
527
528         ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
529                         IPU_CONF_DI1_EN, ipu_clk);
530         if (ret) {
531                 unit = "di1";
532                 goto err_di_1;
533         }
534
535         ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
536                         IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
537         if (ret) {
538                 unit = "dc_template";
539                 goto err_dc;
540         }
541
542         ret = ipu_dmfc_init(ipu, dev, ipu_base +
543                         devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
544         if (ret) {
545                 unit = "dmfc";
546                 goto err_dmfc;
547         }
548
549         ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
550         if (ret) {
551                 unit = "dp";
552                 goto err_dp;
553         }
554
555         ret = ipu_smfc_init(ipu, dev, ipu_base +
556                         devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
557         if (ret) {
558                 unit = "smfc";
559                 goto err_smfc;
560         }
561
562         return 0;
563
564 err_smfc:
565         ipu_dp_exit(ipu);
566 err_dp:
567         ipu_dmfc_exit(ipu);
568 err_dmfc:
569         ipu_dc_exit(ipu);
570 err_dc:
571         ipu_di_exit(ipu, 1);
572 err_di_1:
573         ipu_di_exit(ipu, 0);
574 err_di_0:
575         ipu_ic_exit(ipu);
576 err_ic:
577         ipu_csi_exit(ipu, 1);
578 err_csi_1:
579         ipu_csi_exit(ipu, 0);
580 err_csi_0:
581         ipu_cpmem_exit(ipu);
582 err_cpmem:
583         dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
584         return ret;
585 }
586
587 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
588 {
589         unsigned long status;
590         int i, bit, irq;
591
592         for (i = 0; i < num_regs; i++) {
593
594                 status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
595                 status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
596
597                 for_each_set_bit(bit, &status, 32) {
598                         irq = irq_linear_revmap(ipu->domain,
599                                                 regs[i] * 32 + bit);
600                         if (irq)
601                                 generic_handle_irq(irq);
602                 }
603         }
604 }
605
606 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
607 {
608         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
609         const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
610         struct irq_chip *chip = irq_get_chip(irq);
611
612         chained_irq_enter(chip, desc);
613
614         ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
615
616         chained_irq_exit(chip, desc);
617 }
618
619 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
620 {
621         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
622         const int int_reg[] = { 4, 5, 8, 9};
623         struct irq_chip *chip = irq_get_chip(irq);
624
625         chained_irq_enter(chip, desc);
626
627         ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
628
629         chained_irq_exit(chip, desc);
630 }
631
632 int ipu_map_irq(struct ipu_soc *ipu, int irq)
633 {
634         int virq;
635
636         virq = irq_linear_revmap(ipu->domain, irq);
637         if (!virq)
638                 virq = irq_create_mapping(ipu->domain, irq);
639
640         return virq;
641 }
642 EXPORT_SYMBOL_GPL(ipu_map_irq);
643
644 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
645                 enum ipu_channel_irq irq_type)
646 {
647         return ipu_map_irq(ipu, irq_type + channel->num);
648 }
649 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
650
651 static void ipu_submodules_exit(struct ipu_soc *ipu)
652 {
653         ipu_smfc_exit(ipu);
654         ipu_dp_exit(ipu);
655         ipu_dmfc_exit(ipu);
656         ipu_dc_exit(ipu);
657         ipu_di_exit(ipu, 1);
658         ipu_di_exit(ipu, 0);
659         ipu_ic_exit(ipu);
660         ipu_csi_exit(ipu, 1);
661         ipu_csi_exit(ipu, 0);
662         ipu_cpmem_exit(ipu);
663 }
664
665 static int platform_remove_devices_fn(struct device *dev, void *unused)
666 {
667         struct platform_device *pdev = to_platform_device(dev);
668
669         platform_device_unregister(pdev);
670
671         return 0;
672 }
673
674 static void platform_device_unregister_children(struct platform_device *pdev)
675 {
676         device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
677 }
678
679 struct ipu_platform_reg {
680         struct ipu_client_platformdata pdata;
681         const char *name;
682         int reg_offset;
683 };
684
685 static const struct ipu_platform_reg client_reg[] = {
686         {
687                 .pdata = {
688                         .di = 0,
689                         .dc = 5,
690                         .dp = IPU_DP_FLOW_SYNC_BG,
691                         .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
692                         .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
693                 },
694                 .name = "imx-ipuv3-crtc",
695         }, {
696                 .pdata = {
697                         .di = 1,
698                         .dc = 1,
699                         .dp = -EINVAL,
700                         .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
701                         .dma[1] = -EINVAL,
702                 },
703                 .name = "imx-ipuv3-crtc",
704         }, {
705                 .pdata = {
706                         .csi = 0,
707                         .dma[0] = IPUV3_CHANNEL_CSI0,
708                         .dma[1] = -EINVAL,
709                 },
710                 .reg_offset = IPU_CM_CSI0_REG_OFS,
711                 .name = "imx-ipuv3-camera",
712         }, {
713                 .pdata = {
714                         .csi = 1,
715                         .dma[0] = IPUV3_CHANNEL_CSI1,
716                         .dma[1] = -EINVAL,
717                 },
718                 .reg_offset = IPU_CM_CSI1_REG_OFS,
719                 .name = "imx-ipuv3-camera",
720         },
721 };
722
723 static DEFINE_MUTEX(ipu_client_id_mutex);
724 static int ipu_client_id;
725
726 static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
727 {
728         struct device *dev = ipu->dev;
729         unsigned i;
730         int id, ret;
731
732         mutex_lock(&ipu_client_id_mutex);
733         id = ipu_client_id;
734         ipu_client_id += ARRAY_SIZE(client_reg);
735         mutex_unlock(&ipu_client_id_mutex);
736
737         for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
738                 const struct ipu_platform_reg *reg = &client_reg[i];
739                 struct platform_device *pdev;
740                 struct resource res;
741
742                 if (reg->reg_offset) {
743                         memset(&res, 0, sizeof(res));
744                         res.flags = IORESOURCE_MEM;
745                         res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
746                         res.end = res.start + PAGE_SIZE - 1;
747                         pdev = platform_device_register_resndata(dev, reg->name,
748                                 id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
749                 } else {
750                         pdev = platform_device_register_data(dev, reg->name,
751                                 id++, &reg->pdata, sizeof(reg->pdata));
752                 }
753
754                 if (IS_ERR(pdev))
755                         goto err_register;
756         }
757
758         return 0;
759
760 err_register:
761         platform_device_unregister_children(to_platform_device(dev));
762
763         return ret;
764 }
765
766
767 static int ipu_irq_init(struct ipu_soc *ipu)
768 {
769         struct irq_chip_generic *gc;
770         struct irq_chip_type *ct;
771         unsigned long unused[IPU_NUM_IRQS / 32] = {
772                 0x400100d0, 0xffe000fd,
773                 0x400100d0, 0xffe000fd,
774                 0x400100d0, 0xffe000fd,
775                 0x4077ffff, 0xffe7e1fd,
776                 0x23fffffe, 0x8880fff0,
777                 0xf98fe7d0, 0xfff81fff,
778                 0x400100d0, 0xffe000fd,
779                 0x00000000,
780         };
781         int ret, i;
782
783         ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
784                                             &irq_generic_chip_ops, ipu);
785         if (!ipu->domain) {
786                 dev_err(ipu->dev, "failed to add irq domain\n");
787                 return -ENODEV;
788         }
789
790         ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
791                                              handle_level_irq, 0,
792                                              IRQF_VALID, 0);
793         if (ret < 0) {
794                 dev_err(ipu->dev, "failed to alloc generic irq chips\n");
795                 irq_domain_remove(ipu->domain);
796                 return ret;
797         }
798
799         for (i = 0; i < IPU_NUM_IRQS; i += 32) {
800                 gc = irq_get_domain_generic_chip(ipu->domain, i);
801                 gc->reg_base = ipu->cm_reg;
802                 gc->unused = unused[i / 32];
803                 ct = gc->chip_types;
804                 ct->chip.irq_ack = irq_gc_ack_set_bit;
805                 ct->chip.irq_mask = irq_gc_mask_clr_bit;
806                 ct->chip.irq_unmask = irq_gc_mask_set_bit;
807                 ct->regs.ack = IPU_INT_STAT(i / 32);
808                 ct->regs.mask = IPU_INT_CTRL(i / 32);
809         }
810
811         irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
812         irq_set_handler_data(ipu->irq_sync, ipu);
813         irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
814         irq_set_handler_data(ipu->irq_err, ipu);
815
816         return 0;
817 }
818
819 static void ipu_irq_exit(struct ipu_soc *ipu)
820 {
821         int i, irq;
822
823         irq_set_chained_handler(ipu->irq_err, NULL);
824         irq_set_handler_data(ipu->irq_err, NULL);
825         irq_set_chained_handler(ipu->irq_sync, NULL);
826         irq_set_handler_data(ipu->irq_sync, NULL);
827
828         /* TODO: remove irq_domain_generic_chips */
829
830         for (i = 0; i < IPU_NUM_IRQS; i++) {
831                 irq = irq_linear_revmap(ipu->domain, i);
832                 if (irq)
833                         irq_dispose_mapping(irq);
834         }
835
836         irq_domain_remove(ipu->domain);
837 }
838
839 static int ipu_probe(struct platform_device *pdev)
840 {
841         const struct of_device_id *of_id =
842                         of_match_device(imx_ipu_dt_ids, &pdev->dev);
843         struct ipu_soc *ipu;
844         struct resource *res;
845         unsigned long ipu_base;
846         int i, ret, irq_sync, irq_err;
847         const struct ipu_devtype *devtype;
848
849         devtype = of_id->data;
850
851         irq_sync = platform_get_irq(pdev, 0);
852         irq_err = platform_get_irq(pdev, 1);
853         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
854
855         dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
856                         irq_sync, irq_err);
857
858         if (!res || irq_sync < 0 || irq_err < 0)
859                 return -ENODEV;
860
861         ipu_base = res->start;
862
863         ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
864         if (!ipu)
865                 return -ENODEV;
866
867         for (i = 0; i < 64; i++)
868                 ipu->channel[i].ipu = ipu;
869         ipu->devtype = devtype;
870         ipu->ipu_type = devtype->type;
871
872         spin_lock_init(&ipu->lock);
873         mutex_init(&ipu->channel_lock);
874
875         dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
876                         ipu_base + devtype->cm_ofs);
877         dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
878                         ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
879         dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
880                         ipu_base + devtype->cpmem_ofs);
881         dev_dbg(&pdev->dev, "csi0:    0x%08lx\n",
882                         ipu_base + devtype->csi0_ofs);
883         dev_dbg(&pdev->dev, "csi1:    0x%08lx\n",
884                         ipu_base + devtype->csi1_ofs);
885         dev_dbg(&pdev->dev, "ic:      0x%08lx\n",
886                         ipu_base + devtype->ic_ofs);
887         dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
888                         ipu_base + devtype->disp0_ofs);
889         dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
890                         ipu_base + devtype->disp1_ofs);
891         dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
892                         ipu_base + devtype->srm_ofs);
893         dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
894                         ipu_base + devtype->tpm_ofs);
895         dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
896                         ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
897         dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
898                         ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
899         dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
900                         ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
901         dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
902                         ipu_base + devtype->vdi_ofs);
903
904         ipu->cm_reg = devm_ioremap(&pdev->dev,
905                         ipu_base + devtype->cm_ofs, PAGE_SIZE);
906         ipu->idmac_reg = devm_ioremap(&pdev->dev,
907                         ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
908                         PAGE_SIZE);
909
910         if (!ipu->cm_reg || !ipu->idmac_reg)
911                 return -ENOMEM;
912
913         ipu->clk = devm_clk_get(&pdev->dev, "bus");
914         if (IS_ERR(ipu->clk)) {
915                 ret = PTR_ERR(ipu->clk);
916                 dev_err(&pdev->dev, "clk_get failed with %d", ret);
917                 return ret;
918         }
919
920         platform_set_drvdata(pdev, ipu);
921
922         ret = clk_prepare_enable(ipu->clk);
923         if (ret) {
924                 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
925                 return ret;
926         }
927
928         ipu->dev = &pdev->dev;
929         ipu->irq_sync = irq_sync;
930         ipu->irq_err = irq_err;
931
932         ret = ipu_irq_init(ipu);
933         if (ret)
934                 goto out_failed_irq;
935
936         ret = device_reset(&pdev->dev);
937         if (ret) {
938                 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
939                 goto out_failed_reset;
940         }
941         ret = ipu_memory_reset(ipu);
942         if (ret)
943                 goto out_failed_reset;
944
945         /* Set MCU_T to divide MCU access window into 2 */
946         ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
947                         IPU_DISP_GEN);
948
949         ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
950         if (ret)
951                 goto failed_submodules_init;
952
953         ret = ipu_add_client_devices(ipu, ipu_base);
954         if (ret) {
955                 dev_err(&pdev->dev, "adding client devices failed with %d\n",
956                                 ret);
957                 goto failed_add_clients;
958         }
959
960         dev_info(&pdev->dev, "%s probed\n", devtype->name);
961
962         return 0;
963
964 failed_add_clients:
965         ipu_submodules_exit(ipu);
966 failed_submodules_init:
967 out_failed_reset:
968         ipu_irq_exit(ipu);
969 out_failed_irq:
970         clk_disable_unprepare(ipu->clk);
971         return ret;
972 }
973
974 static int ipu_remove(struct platform_device *pdev)
975 {
976         struct ipu_soc *ipu = platform_get_drvdata(pdev);
977
978         platform_device_unregister_children(pdev);
979         ipu_submodules_exit(ipu);
980         ipu_irq_exit(ipu);
981
982         clk_disable_unprepare(ipu->clk);
983
984         return 0;
985 }
986
987 static struct platform_driver imx_ipu_driver = {
988         .driver = {
989                 .name = "imx-ipuv3",
990                 .of_match_table = imx_ipu_dt_ids,
991         },
992         .probe = ipu_probe,
993         .remove = ipu_remove,
994 };
995
996 module_platform_driver(imx_ipu_driver);
997
998 MODULE_ALIAS("platform:imx-ipuv3");
999 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1000 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1001 MODULE_LICENSE("GPL");