4 * Copyright (C) 2003 - 2008 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7 * Graphics DMA and LCD DMA graphics tranformations
8 * by Imre Deak <imre.deak@nokia.com>
9 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12 * Copyright (C) 2009 Texas Instruments
13 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
16 * Converted DMA library into platform driver
17 * - G, Manjunath Kondaiah <manjugk@ti.com>
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
24 #include <linux/err.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/device.h>
31 #include <plat/omap_hwmod.h>
32 #include <plat/omap_device.h>
35 #define OMAP2_DMA_STRIDE 0x60
40 static struct omap_dma_dev_attr *d;
42 static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
44 static u16 reg_map[] = {
47 [IRQSTATUS_L0] = 0x08,
48 [IRQSTATUS_L1] = 0x0c,
49 [IRQSTATUS_L2] = 0x10,
50 [IRQSTATUS_L3] = 0x14,
51 [IRQENABLE_L0] = 0x18,
52 [IRQENABLE_L1] = 0x1c,
53 [IRQENABLE_L2] = 0x20,
54 [IRQENABLE_L3] = 0x24,
56 [OCP_SYSCONFIG] = 0x2c,
62 /* Common register offsets */
77 /* Channel specific register offsets */
84 /* OMAP4 specific registers */
90 static void __iomem *dma_base;
91 static inline void dma_write(u32 val, int reg, int lch)
96 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
97 offset = reg_map[reg] + (stride * lch);
98 __raw_writel(val, dma_base + offset);
101 static inline u32 dma_read(int reg, int lch)
106 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
107 offset = reg_map[reg] + (stride * lch);
108 val = __raw_readl(dma_base + offset);
112 static inline void omap2_disable_irq_lch(int lch)
116 val = dma_read(IRQENABLE_L0, lch);
118 dma_write(val, IRQENABLE_L0, lch);
121 static void omap2_clear_dma(int lch)
123 int i = dma_common_ch_start;
125 for (; i <= dma_common_ch_end; i += 1)
126 dma_write(0, i, lch);
129 static void omap2_show_dma_caps(void)
131 u8 revision = dma_read(REVISION, 0) & 0xff;
132 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
133 revision >> 4, revision & 0xf);
137 static u32 configure_dma_errata(void)
141 * Errata applicable for OMAP2430ES1.0 and all omap2420
144 * Erratum ID: Not Available
145 * Inter Frame DMA buffering issue DMA will wrongly
146 * buffer elements if packing and bursting is enabled. This might
147 * result in data gets stalled in FIFO at the end of the block.
148 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
149 * guarantee no data will stay in the DMA FIFO in case inter frame
153 * Erratum ID: Not Available
154 * DMA may hang when several channels are used in parallel
155 * In the following configuration, DMA channel hanging can occur:
156 * a. Channel i, hardware synchronized, is enabled
157 * b. Another channel (Channel x), software synchronized, is enabled.
158 * c. Channel i is disabled before end of transfer
159 * d. Channel i is reenabled.
160 * e. Steps 1 to 4 are repeated a certain number of times.
161 * f. A third channel (Channel y), software synchronized, is enabled.
162 * Channel x and Channel y may hang immediately after step 'f'.
164 * For any channel used - make sure NextLCH_ID is set to the value j.
166 if (cpu_is_omap2420() || (cpu_is_omap2430() &&
167 (omap_type() == OMAP2430_REV_ES1_0))) {
169 SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
170 SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
174 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
175 * after a transaction error.
176 * Workaround: SW should explicitely disable the channel.
178 if (cpu_class_is_omap2())
179 SET_DMA_ERRATA(DMA_ERRATA_i378);
182 * Erratum ID: i541: sDMA FIFO draining does not finish
183 * If sDMA channel is disabled on the fly, sDMA enters standby even
184 * through FIFO Drain is still in progress
185 * Workaround: Put sDMA in NoStandby more before a logical channel is
186 * disabled, then put it back to SmartStandby right after the channel
187 * finishes FIFO draining.
189 if (cpu_is_omap34xx())
190 SET_DMA_ERRATA(DMA_ERRATA_i541);
193 * Erratum ID: i88 : Special programming model needed to disable DMA
194 * before end of block.
195 * Workaround: software must ensure that the DMA is configured in No
196 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
198 if (omap_type() == OMAP3430_REV_ES1_0)
199 SET_DMA_ERRATA(DMA_ERRATA_i88);
202 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
203 * read before the DMA controller finished disabling the channel.
205 SET_DMA_ERRATA(DMA_ERRATA_3_3);
208 * Erratum ID: Not Available
209 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
210 * after secure sram context save and restore.
211 * Work around: Hence we need to manually clear those IRQs to avoid
212 * spurious interrupts. This affects only secure devices.
214 if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
215 SET_DMA_ERRATA(DMA_ROMCODE_BUG);
220 /* One time initializations */
221 static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
223 struct platform_device *pdev;
224 struct omap_system_dma_plat_info *p;
225 struct resource *mem;
226 char *name = "omap_dma_system";
228 dma_stride = OMAP2_DMA_STRIDE;
229 dma_common_ch_start = CSDP;
231 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
233 pr_err("%s: Unable to allocate pdata for %s:%s\n",
234 __func__, name, oh->name);
238 p->dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr;
239 p->disable_irq_lch = omap2_disable_irq_lch;
240 p->show_dma_caps = omap2_show_dma_caps;
241 p->clear_dma = omap2_clear_dma;
242 p->dma_write = dma_write;
243 p->dma_read = dma_read;
245 p->clear_lch_regs = NULL;
247 p->errata = configure_dma_errata();
249 pdev = omap_device_build(name, 0, oh, p, sizeof(*p), NULL, 0, 0);
252 pr_err("%s: Can't build omap_device for %s:%s.\n",
253 __func__, name, oh->name);
254 return PTR_ERR(pdev);
257 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
259 dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
262 dma_base = ioremap(mem->start, resource_size(mem));
264 dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
269 d->chan = kzalloc(sizeof(struct omap_dma_lch) *
270 (d->lch_count), GFP_KERNEL);
273 dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
277 /* Check the capabilities register for descriptor loading feature */
278 if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
279 dma_common_ch_end = CCDN;
281 dma_common_ch_end = CCFN;
286 static int __init omap2_system_dma_init(void)
288 return omap_hwmod_for_each_by_class("dma",
289 omap2_system_dma_init_dev, NULL);
291 arch_initcall(omap2_system_dma_init);