Merge tag 'v3.4-rc6' into next/cleanup
[firefly-linux-kernel-4.4.55.git] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19  * Converted DMA library into DMA platform driver.
20  *      - G, Manjunath Kondaiah <manjugk@ti.com>
21  *
22  * This program is free software; you can redistribute it and/or modify
23  * it under the terms of the GNU General Public License version 2 as
24  * published by the Free Software Foundation.
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/io.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38
39 #include <mach/hardware.h>
40 #include <plat/dma.h>
41
42 #include <plat/tc.h>
43
44 /*
45  * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
46  * channels that an instance of the SDMA IP block can support.  Used
47  * to size arrays.  (The actual maximum on a particular SoC may be less
48  * than this -- for example, OMAP1 SDMA instances only support 17 logical
49  * DMA channels.)
50  */
51 #define MAX_LOGICAL_DMA_CH_COUNT                32
52
53 #undef DEBUG
54
55 #ifndef CONFIG_ARCH_OMAP1
56 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
57         DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
58 };
59
60 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
61 #endif
62
63 #define OMAP_DMA_ACTIVE                 0x01
64 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffffffff
65
66 #define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
67
68 static struct omap_system_dma_plat_info *p;
69 static struct omap_dma_dev_attr *d;
70
71 static int enable_1510_mode;
72 static u32 errata;
73
74 static struct omap_dma_global_context_registers {
75         u32 dma_irqenable_l0;
76         u32 dma_ocp_sysconfig;
77         u32 dma_gcr;
78 } omap_dma_global_context;
79
80 struct dma_link_info {
81         int *linked_dmach_q;
82         int no_of_lchs_linked;
83
84         int q_count;
85         int q_tail;
86         int q_head;
87
88         int chain_state;
89         int chain_mode;
90
91 };
92
93 static struct dma_link_info *dma_linked_lch;
94
95 #ifndef CONFIG_ARCH_OMAP1
96
97 /* Chain handling macros */
98 #define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
99         do {                                                            \
100                 dma_linked_lch[chain_id].q_head =                       \
101                 dma_linked_lch[chain_id].q_tail =                       \
102                 dma_linked_lch[chain_id].q_count = 0;                   \
103         } while (0)
104 #define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
105                 (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
106                 dma_linked_lch[chain_id].q_count)
107 #define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
108         do {                                                            \
109                 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
110                 dma_linked_lch[chain_id].q_count)                       \
111         } while (0)
112 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
113                 (0 == dma_linked_lch[chain_id].q_count)
114 #define __OMAP_DMA_CHAIN_INCQ(end)                                      \
115         ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
116 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
117         do {                                                            \
118                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
119                 dma_linked_lch[chain_id].q_count--;                     \
120         } while (0)
121
122 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
123         do {                                                            \
124                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
125                 dma_linked_lch[chain_id].q_count++; \
126         } while (0)
127 #endif
128
129 static int dma_lch_count;
130 static int dma_chan_count;
131 static int omap_dma_reserve_channels;
132
133 static spinlock_t dma_chan_lock;
134 static struct omap_dma_lch *dma_chan;
135
136 static inline void disable_lnk(int lch);
137 static void omap_disable_channel_irq(int lch);
138 static inline void omap_enable_channel_irq(int lch);
139
140 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
141                                                 __func__);
142
143 #ifdef CONFIG_ARCH_OMAP15XX
144 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
145 static int omap_dma_in_1510_mode(void)
146 {
147         return enable_1510_mode;
148 }
149 #else
150 #define omap_dma_in_1510_mode()         0
151 #endif
152
153 #ifdef CONFIG_ARCH_OMAP1
154 static inline int get_gdma_dev(int req)
155 {
156         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
157         int shift = ((req - 1) % 5) * 6;
158
159         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
160 }
161
162 static inline void set_gdma_dev(int req, int dev)
163 {
164         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
165         int shift = ((req - 1) % 5) * 6;
166         u32 l;
167
168         l = omap_readl(reg);
169         l &= ~(0x3f << shift);
170         l |= (dev - 1) << shift;
171         omap_writel(l, reg);
172 }
173 #else
174 #define set_gdma_dev(req, dev)  do {} while (0)
175 #define omap_readl(reg)         0
176 #define omap_writel(val, reg)   do {} while (0)
177 #endif
178
179 void omap_set_dma_priority(int lch, int dst_port, int priority)
180 {
181         unsigned long reg;
182         u32 l;
183
184         if (cpu_class_is_omap1()) {
185                 switch (dst_port) {
186                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
187                         reg = OMAP_TC_OCPT1_PRIOR;
188                         break;
189                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
190                         reg = OMAP_TC_OCPT2_PRIOR;
191                         break;
192                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
193                         reg = OMAP_TC_EMIFF_PRIOR;
194                         break;
195                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
196                         reg = OMAP_TC_EMIFS_PRIOR;
197                         break;
198                 default:
199                         BUG();
200                         return;
201                 }
202                 l = omap_readl(reg);
203                 l &= ~(0xf << 8);
204                 l |= (priority & 0xf) << 8;
205                 omap_writel(l, reg);
206         }
207
208         if (cpu_class_is_omap2()) {
209                 u32 ccr;
210
211                 ccr = p->dma_read(CCR, lch);
212                 if (priority)
213                         ccr |= (1 << 6);
214                 else
215                         ccr &= ~(1 << 6);
216                 p->dma_write(ccr, CCR, lch);
217         }
218 }
219 EXPORT_SYMBOL(omap_set_dma_priority);
220
221 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
222                                   int frame_count, int sync_mode,
223                                   int dma_trigger, int src_or_dst_synch)
224 {
225         u32 l;
226
227         l = p->dma_read(CSDP, lch);
228         l &= ~0x03;
229         l |= data_type;
230         p->dma_write(l, CSDP, lch);
231
232         if (cpu_class_is_omap1()) {
233                 u16 ccr;
234
235                 ccr = p->dma_read(CCR, lch);
236                 ccr &= ~(1 << 5);
237                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
238                         ccr |= 1 << 5;
239                 p->dma_write(ccr, CCR, lch);
240
241                 ccr = p->dma_read(CCR2, lch);
242                 ccr &= ~(1 << 2);
243                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
244                         ccr |= 1 << 2;
245                 p->dma_write(ccr, CCR2, lch);
246         }
247
248         if (cpu_class_is_omap2() && dma_trigger) {
249                 u32 val;
250
251                 val = p->dma_read(CCR, lch);
252
253                 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
254                 val &= ~((1 << 23) | (3 << 19) | 0x1f);
255                 val |= (dma_trigger & ~0x1f) << 14;
256                 val |= dma_trigger & 0x1f;
257
258                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
259                         val |= 1 << 5;
260                 else
261                         val &= ~(1 << 5);
262
263                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
264                         val |= 1 << 18;
265                 else
266                         val &= ~(1 << 18);
267
268                 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
269                         val &= ~(1 << 24);      /* dest synch */
270                         val |= (1 << 23);       /* Prefetch */
271                 } else if (src_or_dst_synch) {
272                         val |= 1 << 24;         /* source synch */
273                 } else {
274                         val &= ~(1 << 24);      /* dest synch */
275                 }
276                 p->dma_write(val, CCR, lch);
277         }
278
279         p->dma_write(elem_count, CEN, lch);
280         p->dma_write(frame_count, CFN, lch);
281 }
282 EXPORT_SYMBOL(omap_set_dma_transfer_params);
283
284 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
285 {
286         BUG_ON(omap_dma_in_1510_mode());
287
288         if (cpu_class_is_omap1()) {
289                 u16 w;
290
291                 w = p->dma_read(CCR2, lch);
292                 w &= ~0x03;
293
294                 switch (mode) {
295                 case OMAP_DMA_CONSTANT_FILL:
296                         w |= 0x01;
297                         break;
298                 case OMAP_DMA_TRANSPARENT_COPY:
299                         w |= 0x02;
300                         break;
301                 case OMAP_DMA_COLOR_DIS:
302                         break;
303                 default:
304                         BUG();
305                 }
306                 p->dma_write(w, CCR2, lch);
307
308                 w = p->dma_read(LCH_CTRL, lch);
309                 w &= ~0x0f;
310                 /* Default is channel type 2D */
311                 if (mode) {
312                         p->dma_write(color, COLOR, lch);
313                         w |= 1;         /* Channel type G */
314                 }
315                 p->dma_write(w, LCH_CTRL, lch);
316         }
317
318         if (cpu_class_is_omap2()) {
319                 u32 val;
320
321                 val = p->dma_read(CCR, lch);
322                 val &= ~((1 << 17) | (1 << 16));
323
324                 switch (mode) {
325                 case OMAP_DMA_CONSTANT_FILL:
326                         val |= 1 << 16;
327                         break;
328                 case OMAP_DMA_TRANSPARENT_COPY:
329                         val |= 1 << 17;
330                         break;
331                 case OMAP_DMA_COLOR_DIS:
332                         break;
333                 default:
334                         BUG();
335                 }
336                 p->dma_write(val, CCR, lch);
337
338                 color &= 0xffffff;
339                 p->dma_write(color, COLOR, lch);
340         }
341 }
342 EXPORT_SYMBOL(omap_set_dma_color_mode);
343
344 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
345 {
346         if (cpu_class_is_omap2()) {
347                 u32 csdp;
348
349                 csdp = p->dma_read(CSDP, lch);
350                 csdp &= ~(0x3 << 16);
351                 csdp |= (mode << 16);
352                 p->dma_write(csdp, CSDP, lch);
353         }
354 }
355 EXPORT_SYMBOL(omap_set_dma_write_mode);
356
357 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
358 {
359         if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
360                 u32 l;
361
362                 l = p->dma_read(LCH_CTRL, lch);
363                 l &= ~0x7;
364                 l |= mode;
365                 p->dma_write(l, LCH_CTRL, lch);
366         }
367 }
368 EXPORT_SYMBOL(omap_set_dma_channel_mode);
369
370 /* Note that src_port is only for omap1 */
371 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
372                              unsigned long src_start,
373                              int src_ei, int src_fi)
374 {
375         u32 l;
376
377         if (cpu_class_is_omap1()) {
378                 u16 w;
379
380                 w = p->dma_read(CSDP, lch);
381                 w &= ~(0x1f << 2);
382                 w |= src_port << 2;
383                 p->dma_write(w, CSDP, lch);
384         }
385
386         l = p->dma_read(CCR, lch);
387         l &= ~(0x03 << 12);
388         l |= src_amode << 12;
389         p->dma_write(l, CCR, lch);
390
391         p->dma_write(src_start, CSSA, lch);
392
393         p->dma_write(src_ei, CSEI, lch);
394         p->dma_write(src_fi, CSFI, lch);
395 }
396 EXPORT_SYMBOL(omap_set_dma_src_params);
397
398 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
399 {
400         omap_set_dma_transfer_params(lch, params->data_type,
401                                      params->elem_count, params->frame_count,
402                                      params->sync_mode, params->trigger,
403                                      params->src_or_dst_synch);
404         omap_set_dma_src_params(lch, params->src_port,
405                                 params->src_amode, params->src_start,
406                                 params->src_ei, params->src_fi);
407
408         omap_set_dma_dest_params(lch, params->dst_port,
409                                  params->dst_amode, params->dst_start,
410                                  params->dst_ei, params->dst_fi);
411         if (params->read_prio || params->write_prio)
412                 omap_dma_set_prio_lch(lch, params->read_prio,
413                                       params->write_prio);
414 }
415 EXPORT_SYMBOL(omap_set_dma_params);
416
417 void omap_set_dma_src_index(int lch, int eidx, int fidx)
418 {
419         if (cpu_class_is_omap2())
420                 return;
421
422         p->dma_write(eidx, CSEI, lch);
423         p->dma_write(fidx, CSFI, lch);
424 }
425 EXPORT_SYMBOL(omap_set_dma_src_index);
426
427 void omap_set_dma_src_data_pack(int lch, int enable)
428 {
429         u32 l;
430
431         l = p->dma_read(CSDP, lch);
432         l &= ~(1 << 6);
433         if (enable)
434                 l |= (1 << 6);
435         p->dma_write(l, CSDP, lch);
436 }
437 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
438
439 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
440 {
441         unsigned int burst = 0;
442         u32 l;
443
444         l = p->dma_read(CSDP, lch);
445         l &= ~(0x03 << 7);
446
447         switch (burst_mode) {
448         case OMAP_DMA_DATA_BURST_DIS:
449                 break;
450         case OMAP_DMA_DATA_BURST_4:
451                 if (cpu_class_is_omap2())
452                         burst = 0x1;
453                 else
454                         burst = 0x2;
455                 break;
456         case OMAP_DMA_DATA_BURST_8:
457                 if (cpu_class_is_omap2()) {
458                         burst = 0x2;
459                         break;
460                 }
461                 /*
462                  * not supported by current hardware on OMAP1
463                  * w |= (0x03 << 7);
464                  * fall through
465                  */
466         case OMAP_DMA_DATA_BURST_16:
467                 if (cpu_class_is_omap2()) {
468                         burst = 0x3;
469                         break;
470                 }
471                 /*
472                  * OMAP1 don't support burst 16
473                  * fall through
474                  */
475         default:
476                 BUG();
477         }
478
479         l |= (burst << 7);
480         p->dma_write(l, CSDP, lch);
481 }
482 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
483
484 /* Note that dest_port is only for OMAP1 */
485 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
486                               unsigned long dest_start,
487                               int dst_ei, int dst_fi)
488 {
489         u32 l;
490
491         if (cpu_class_is_omap1()) {
492                 l = p->dma_read(CSDP, lch);
493                 l &= ~(0x1f << 9);
494                 l |= dest_port << 9;
495                 p->dma_write(l, CSDP, lch);
496         }
497
498         l = p->dma_read(CCR, lch);
499         l &= ~(0x03 << 14);
500         l |= dest_amode << 14;
501         p->dma_write(l, CCR, lch);
502
503         p->dma_write(dest_start, CDSA, lch);
504
505         p->dma_write(dst_ei, CDEI, lch);
506         p->dma_write(dst_fi, CDFI, lch);
507 }
508 EXPORT_SYMBOL(omap_set_dma_dest_params);
509
510 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
511 {
512         if (cpu_class_is_omap2())
513                 return;
514
515         p->dma_write(eidx, CDEI, lch);
516         p->dma_write(fidx, CDFI, lch);
517 }
518 EXPORT_SYMBOL(omap_set_dma_dest_index);
519
520 void omap_set_dma_dest_data_pack(int lch, int enable)
521 {
522         u32 l;
523
524         l = p->dma_read(CSDP, lch);
525         l &= ~(1 << 13);
526         if (enable)
527                 l |= 1 << 13;
528         p->dma_write(l, CSDP, lch);
529 }
530 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
531
532 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
533 {
534         unsigned int burst = 0;
535         u32 l;
536
537         l = p->dma_read(CSDP, lch);
538         l &= ~(0x03 << 14);
539
540         switch (burst_mode) {
541         case OMAP_DMA_DATA_BURST_DIS:
542                 break;
543         case OMAP_DMA_DATA_BURST_4:
544                 if (cpu_class_is_omap2())
545                         burst = 0x1;
546                 else
547                         burst = 0x2;
548                 break;
549         case OMAP_DMA_DATA_BURST_8:
550                 if (cpu_class_is_omap2())
551                         burst = 0x2;
552                 else
553                         burst = 0x3;
554                 break;
555         case OMAP_DMA_DATA_BURST_16:
556                 if (cpu_class_is_omap2()) {
557                         burst = 0x3;
558                         break;
559                 }
560                 /*
561                  * OMAP1 don't support burst 16
562                  * fall through
563                  */
564         default:
565                 printk(KERN_ERR "Invalid DMA burst mode\n");
566                 BUG();
567                 return;
568         }
569         l |= (burst << 14);
570         p->dma_write(l, CSDP, lch);
571 }
572 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
573
574 static inline void omap_enable_channel_irq(int lch)
575 {
576         u32 status;
577
578         /* Clear CSR */
579         if (cpu_class_is_omap1())
580                 status = p->dma_read(CSR, lch);
581         else if (cpu_class_is_omap2())
582                 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
583
584         /* Enable some nice interrupts. */
585         p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
586 }
587
588 static void omap_disable_channel_irq(int lch)
589 {
590         if (cpu_class_is_omap2())
591                 p->dma_write(0, CICR, lch);
592 }
593
594 void omap_enable_dma_irq(int lch, u16 bits)
595 {
596         dma_chan[lch].enabled_irqs |= bits;
597 }
598 EXPORT_SYMBOL(omap_enable_dma_irq);
599
600 void omap_disable_dma_irq(int lch, u16 bits)
601 {
602         dma_chan[lch].enabled_irqs &= ~bits;
603 }
604 EXPORT_SYMBOL(omap_disable_dma_irq);
605
606 static inline void enable_lnk(int lch)
607 {
608         u32 l;
609
610         l = p->dma_read(CLNK_CTRL, lch);
611
612         if (cpu_class_is_omap1())
613                 l &= ~(1 << 14);
614
615         /* Set the ENABLE_LNK bits */
616         if (dma_chan[lch].next_lch != -1)
617                 l = dma_chan[lch].next_lch | (1 << 15);
618
619 #ifndef CONFIG_ARCH_OMAP1
620         if (cpu_class_is_omap2())
621                 if (dma_chan[lch].next_linked_ch != -1)
622                         l = dma_chan[lch].next_linked_ch | (1 << 15);
623 #endif
624
625         p->dma_write(l, CLNK_CTRL, lch);
626 }
627
628 static inline void disable_lnk(int lch)
629 {
630         u32 l;
631
632         l = p->dma_read(CLNK_CTRL, lch);
633
634         /* Disable interrupts */
635         if (cpu_class_is_omap1()) {
636                 p->dma_write(0, CICR, lch);
637                 /* Set the STOP_LNK bit */
638                 l |= 1 << 14;
639         }
640
641         if (cpu_class_is_omap2()) {
642                 omap_disable_channel_irq(lch);
643                 /* Clear the ENABLE_LNK bit */
644                 l &= ~(1 << 15);
645         }
646
647         p->dma_write(l, CLNK_CTRL, lch);
648         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
649 }
650
651 static inline void omap2_enable_irq_lch(int lch)
652 {
653         u32 val;
654         unsigned long flags;
655
656         if (!cpu_class_is_omap2())
657                 return;
658
659         spin_lock_irqsave(&dma_chan_lock, flags);
660         val = p->dma_read(IRQENABLE_L0, lch);
661         val |= 1 << lch;
662         p->dma_write(val, IRQENABLE_L0, lch);
663         spin_unlock_irqrestore(&dma_chan_lock, flags);
664 }
665
666 static inline void omap2_disable_irq_lch(int lch)
667 {
668         u32 val;
669         unsigned long flags;
670
671         if (!cpu_class_is_omap2())
672                 return;
673
674         spin_lock_irqsave(&dma_chan_lock, flags);
675         val = p->dma_read(IRQENABLE_L0, lch);
676         val &= ~(1 << lch);
677         p->dma_write(val, IRQENABLE_L0, lch);
678         spin_unlock_irqrestore(&dma_chan_lock, flags);
679 }
680
681 int omap_request_dma(int dev_id, const char *dev_name,
682                      void (*callback)(int lch, u16 ch_status, void *data),
683                      void *data, int *dma_ch_out)
684 {
685         int ch, free_ch = -1;
686         unsigned long flags;
687         struct omap_dma_lch *chan;
688
689         spin_lock_irqsave(&dma_chan_lock, flags);
690         for (ch = 0; ch < dma_chan_count; ch++) {
691                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
692                         free_ch = ch;
693                         if (dev_id == 0)
694                                 break;
695                 }
696         }
697         if (free_ch == -1) {
698                 spin_unlock_irqrestore(&dma_chan_lock, flags);
699                 return -EBUSY;
700         }
701         chan = dma_chan + free_ch;
702         chan->dev_id = dev_id;
703
704         if (p->clear_lch_regs)
705                 p->clear_lch_regs(free_ch);
706
707         if (cpu_class_is_omap2())
708                 omap_clear_dma(free_ch);
709
710         spin_unlock_irqrestore(&dma_chan_lock, flags);
711
712         chan->dev_name = dev_name;
713         chan->callback = callback;
714         chan->data = data;
715         chan->flags = 0;
716
717 #ifndef CONFIG_ARCH_OMAP1
718         if (cpu_class_is_omap2()) {
719                 chan->chain_id = -1;
720                 chan->next_linked_ch = -1;
721         }
722 #endif
723
724         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
725
726         if (cpu_class_is_omap1())
727                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
728         else if (cpu_class_is_omap2())
729                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
730                         OMAP2_DMA_TRANS_ERR_IRQ;
731
732         if (cpu_is_omap16xx()) {
733                 /* If the sync device is set, configure it dynamically. */
734                 if (dev_id != 0) {
735                         set_gdma_dev(free_ch + 1, dev_id);
736                         dev_id = free_ch + 1;
737                 }
738                 /*
739                  * Disable the 1510 compatibility mode and set the sync device
740                  * id.
741                  */
742                 p->dma_write(dev_id | (1 << 10), CCR, free_ch);
743         } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
744                 p->dma_write(dev_id, CCR, free_ch);
745         }
746
747         if (cpu_class_is_omap2()) {
748                 omap2_enable_irq_lch(free_ch);
749                 omap_enable_channel_irq(free_ch);
750                 /* Clear the CSR register and IRQ status register */
751                 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
752                 p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
753         }
754
755         *dma_ch_out = free_ch;
756
757         return 0;
758 }
759 EXPORT_SYMBOL(omap_request_dma);
760
761 void omap_free_dma(int lch)
762 {
763         unsigned long flags;
764
765         if (dma_chan[lch].dev_id == -1) {
766                 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
767                        lch);
768                 return;
769         }
770
771         if (cpu_class_is_omap1()) {
772                 /* Disable all DMA interrupts for the channel. */
773                 p->dma_write(0, CICR, lch);
774                 /* Make sure the DMA transfer is stopped. */
775                 p->dma_write(0, CCR, lch);
776         }
777
778         if (cpu_class_is_omap2()) {
779                 omap2_disable_irq_lch(lch);
780
781                 /* Clear the CSR register and IRQ status register */
782                 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
783                 p->dma_write(1 << lch, IRQSTATUS_L0, lch);
784
785                 /* Disable all DMA interrupts for the channel. */
786                 p->dma_write(0, CICR, lch);
787
788                 /* Make sure the DMA transfer is stopped. */
789                 p->dma_write(0, CCR, lch);
790                 omap_clear_dma(lch);
791         }
792
793         spin_lock_irqsave(&dma_chan_lock, flags);
794         dma_chan[lch].dev_id = -1;
795         dma_chan[lch].next_lch = -1;
796         dma_chan[lch].callback = NULL;
797         spin_unlock_irqrestore(&dma_chan_lock, flags);
798 }
799 EXPORT_SYMBOL(omap_free_dma);
800
801 /**
802  * @brief omap_dma_set_global_params : Set global priority settings for dma
803  *
804  * @param arb_rate
805  * @param max_fifo_depth
806  * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
807  *                                                 DMA_THREAD_RESERVE_ONET
808  *                                                 DMA_THREAD_RESERVE_TWOT
809  *                                                 DMA_THREAD_RESERVE_THREET
810  */
811 void
812 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
813 {
814         u32 reg;
815
816         if (!cpu_class_is_omap2()) {
817                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
818                 return;
819         }
820
821         if (max_fifo_depth == 0)
822                 max_fifo_depth = 1;
823         if (arb_rate == 0)
824                 arb_rate = 1;
825
826         reg = 0xff & max_fifo_depth;
827         reg |= (0x3 & tparams) << 12;
828         reg |= (arb_rate & 0xff) << 16;
829
830         p->dma_write(reg, GCR, 0);
831 }
832 EXPORT_SYMBOL(omap_dma_set_global_params);
833
834 /**
835  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
836  *
837  * @param lch
838  * @param read_prio - Read priority
839  * @param write_prio - Write priority
840  * Both of the above can be set with one of the following values :
841  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
842  */
843 int
844 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
845                       unsigned char write_prio)
846 {
847         u32 l;
848
849         if (unlikely((lch < 0 || lch >= dma_lch_count))) {
850                 printk(KERN_ERR "Invalid channel id\n");
851                 return -EINVAL;
852         }
853         l = p->dma_read(CCR, lch);
854         l &= ~((1 << 6) | (1 << 26));
855         if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
856                 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
857         else
858                 l |= ((read_prio & 0x1) << 6);
859
860         p->dma_write(l, CCR, lch);
861
862         return 0;
863 }
864 EXPORT_SYMBOL(omap_dma_set_prio_lch);
865
866 /*
867  * Clears any DMA state so the DMA engine is ready to restart with new buffers
868  * through omap_start_dma(). Any buffers in flight are discarded.
869  */
870 void omap_clear_dma(int lch)
871 {
872         unsigned long flags;
873
874         local_irq_save(flags);
875         p->clear_dma(lch);
876         local_irq_restore(flags);
877 }
878 EXPORT_SYMBOL(omap_clear_dma);
879
880 void omap_start_dma(int lch)
881 {
882         u32 l;
883
884         /*
885          * The CPC/CDAC register needs to be initialized to zero
886          * before starting dma transfer.
887          */
888         if (cpu_is_omap15xx())
889                 p->dma_write(0, CPC, lch);
890         else
891                 p->dma_write(0, CDAC, lch);
892
893         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
894                 int next_lch, cur_lch;
895                 char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
896
897                 dma_chan_link_map[lch] = 1;
898                 /* Set the link register of the first channel */
899                 enable_lnk(lch);
900
901                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
902                 cur_lch = dma_chan[lch].next_lch;
903                 do {
904                         next_lch = dma_chan[cur_lch].next_lch;
905
906                         /* The loop case: we've been here already */
907                         if (dma_chan_link_map[cur_lch])
908                                 break;
909                         /* Mark the current channel */
910                         dma_chan_link_map[cur_lch] = 1;
911
912                         enable_lnk(cur_lch);
913                         omap_enable_channel_irq(cur_lch);
914
915                         cur_lch = next_lch;
916                 } while (next_lch != -1);
917         } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
918                 p->dma_write(lch, CLNK_CTRL, lch);
919
920         omap_enable_channel_irq(lch);
921
922         l = p->dma_read(CCR, lch);
923
924         if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
925                         l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
926         l |= OMAP_DMA_CCR_EN;
927
928         /*
929          * As dma_write() uses IO accessors which are weakly ordered, there
930          * is no guarantee that data in coherent DMA memory will be visible
931          * to the DMA device.  Add a memory barrier here to ensure that any
932          * such data is visible prior to enabling DMA.
933          */
934         mb();
935         p->dma_write(l, CCR, lch);
936
937         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
938 }
939 EXPORT_SYMBOL(omap_start_dma);
940
941 void omap_stop_dma(int lch)
942 {
943         u32 l;
944
945         /* Disable all interrupts on the channel */
946         if (cpu_class_is_omap1())
947                 p->dma_write(0, CICR, lch);
948
949         l = p->dma_read(CCR, lch);
950         if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
951                         (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
952                 int i = 0;
953                 u32 sys_cf;
954
955                 /* Configure No-Standby */
956                 l = p->dma_read(OCP_SYSCONFIG, lch);
957                 sys_cf = l;
958                 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
959                 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
960                 p->dma_write(l , OCP_SYSCONFIG, 0);
961
962                 l = p->dma_read(CCR, lch);
963                 l &= ~OMAP_DMA_CCR_EN;
964                 p->dma_write(l, CCR, lch);
965
966                 /* Wait for sDMA FIFO drain */
967                 l = p->dma_read(CCR, lch);
968                 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
969                                         OMAP_DMA_CCR_WR_ACTIVE))) {
970                         udelay(5);
971                         i++;
972                         l = p->dma_read(CCR, lch);
973                 }
974                 if (i >= 100)
975                         printk(KERN_ERR "DMA drain did not complete on "
976                                         "lch %d\n", lch);
977                 /* Restore OCP_SYSCONFIG */
978                 p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
979         } else {
980                 l &= ~OMAP_DMA_CCR_EN;
981                 p->dma_write(l, CCR, lch);
982         }
983
984         /*
985          * Ensure that data transferred by DMA is visible to any access
986          * after DMA has been disabled.  This is important for coherent
987          * DMA regions.
988          */
989         mb();
990
991         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
992                 int next_lch, cur_lch = lch;
993                 char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
994
995                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
996                 do {
997                         /* The loop case: we've been here already */
998                         if (dma_chan_link_map[cur_lch])
999                                 break;
1000                         /* Mark the current channel */
1001                         dma_chan_link_map[cur_lch] = 1;
1002
1003                         disable_lnk(cur_lch);
1004
1005                         next_lch = dma_chan[cur_lch].next_lch;
1006                         cur_lch = next_lch;
1007                 } while (next_lch != -1);
1008         }
1009
1010         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1011 }
1012 EXPORT_SYMBOL(omap_stop_dma);
1013
1014 /*
1015  * Allows changing the DMA callback function or data. This may be needed if
1016  * the driver shares a single DMA channel for multiple dma triggers.
1017  */
1018 int omap_set_dma_callback(int lch,
1019                           void (*callback)(int lch, u16 ch_status, void *data),
1020                           void *data)
1021 {
1022         unsigned long flags;
1023
1024         if (lch < 0)
1025                 return -ENODEV;
1026
1027         spin_lock_irqsave(&dma_chan_lock, flags);
1028         if (dma_chan[lch].dev_id == -1) {
1029                 printk(KERN_ERR "DMA callback for not set for free channel\n");
1030                 spin_unlock_irqrestore(&dma_chan_lock, flags);
1031                 return -EINVAL;
1032         }
1033         dma_chan[lch].callback = callback;
1034         dma_chan[lch].data = data;
1035         spin_unlock_irqrestore(&dma_chan_lock, flags);
1036
1037         return 0;
1038 }
1039 EXPORT_SYMBOL(omap_set_dma_callback);
1040
1041 /*
1042  * Returns current physical source address for the given DMA channel.
1043  * If the channel is running the caller must disable interrupts prior calling
1044  * this function and process the returned value before re-enabling interrupt to
1045  * prevent races with the interrupt handler. Note that in continuous mode there
1046  * is a chance for CSSA_L register overflow between the two reads resulting
1047  * in incorrect return value.
1048  */
1049 dma_addr_t omap_get_dma_src_pos(int lch)
1050 {
1051         dma_addr_t offset = 0;
1052
1053         if (cpu_is_omap15xx())
1054                 offset = p->dma_read(CPC, lch);
1055         else
1056                 offset = p->dma_read(CSAC, lch);
1057
1058         if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1059                 offset = p->dma_read(CSAC, lch);
1060
1061         if (!cpu_is_omap15xx()) {
1062                 /*
1063                  * CDAC == 0 indicates that the DMA transfer on the channel has
1064                  * not been started (no data has been transferred so far).
1065                  * Return the programmed source start address in this case.
1066                  */
1067                 if (likely(p->dma_read(CDAC, lch)))
1068                         offset = p->dma_read(CSAC, lch);
1069                 else
1070                         offset = p->dma_read(CSSA, lch);
1071         }
1072
1073         if (cpu_class_is_omap1())
1074                 offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1075
1076         return offset;
1077 }
1078 EXPORT_SYMBOL(omap_get_dma_src_pos);
1079
1080 /*
1081  * Returns current physical destination address for the given DMA channel.
1082  * If the channel is running the caller must disable interrupts prior calling
1083  * this function and process the returned value before re-enabling interrupt to
1084  * prevent races with the interrupt handler. Note that in continuous mode there
1085  * is a chance for CDSA_L register overflow between the two reads resulting
1086  * in incorrect return value.
1087  */
1088 dma_addr_t omap_get_dma_dst_pos(int lch)
1089 {
1090         dma_addr_t offset = 0;
1091
1092         if (cpu_is_omap15xx())
1093                 offset = p->dma_read(CPC, lch);
1094         else
1095                 offset = p->dma_read(CDAC, lch);
1096
1097         /*
1098          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1099          * read before the DMA controller finished disabling the channel.
1100          */
1101         if (!cpu_is_omap15xx() && offset == 0) {
1102                 offset = p->dma_read(CDAC, lch);
1103                 /*
1104                  * CDAC == 0 indicates that the DMA transfer on the channel has
1105                  * not been started (no data has been transferred so far).
1106                  * Return the programmed destination start address in this case.
1107                  */
1108                 if (unlikely(!offset))
1109                         offset = p->dma_read(CDSA, lch);
1110         }
1111
1112         if (cpu_class_is_omap1())
1113                 offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1114
1115         return offset;
1116 }
1117 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1118
1119 int omap_get_dma_active_status(int lch)
1120 {
1121         return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1122 }
1123 EXPORT_SYMBOL(omap_get_dma_active_status);
1124
1125 int omap_dma_running(void)
1126 {
1127         int lch;
1128
1129         if (cpu_class_is_omap1())
1130                 if (omap_lcd_dma_running())
1131                         return 1;
1132
1133         for (lch = 0; lch < dma_chan_count; lch++)
1134                 if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1135                         return 1;
1136
1137         return 0;
1138 }
1139
1140 /*
1141  * lch_queue DMA will start right after lch_head one is finished.
1142  * For this DMA link to start, you still need to start (see omap_start_dma)
1143  * the first one. That will fire up the entire queue.
1144  */
1145 void omap_dma_link_lch(int lch_head, int lch_queue)
1146 {
1147         if (omap_dma_in_1510_mode()) {
1148                 if (lch_head == lch_queue) {
1149                         p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1150                                                                 CCR, lch_head);
1151                         return;
1152                 }
1153                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1154                 BUG();
1155                 return;
1156         }
1157
1158         if ((dma_chan[lch_head].dev_id == -1) ||
1159             (dma_chan[lch_queue].dev_id == -1)) {
1160                 printk(KERN_ERR "omap_dma: trying to link "
1161                        "non requested channels\n");
1162                 dump_stack();
1163         }
1164
1165         dma_chan[lch_head].next_lch = lch_queue;
1166 }
1167 EXPORT_SYMBOL(omap_dma_link_lch);
1168
1169 /*
1170  * Once the DMA queue is stopped, we can destroy it.
1171  */
1172 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1173 {
1174         if (omap_dma_in_1510_mode()) {
1175                 if (lch_head == lch_queue) {
1176                         p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1177                                                                 CCR, lch_head);
1178                         return;
1179                 }
1180                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1181                 BUG();
1182                 return;
1183         }
1184
1185         if (dma_chan[lch_head].next_lch != lch_queue ||
1186             dma_chan[lch_head].next_lch == -1) {
1187                 printk(KERN_ERR "omap_dma: trying to unlink "
1188                        "non linked channels\n");
1189                 dump_stack();
1190         }
1191
1192         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1193             (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1194                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1195                        "before unlinking\n");
1196                 dump_stack();
1197         }
1198
1199         dma_chan[lch_head].next_lch = -1;
1200 }
1201 EXPORT_SYMBOL(omap_dma_unlink_lch);
1202
1203 #ifndef CONFIG_ARCH_OMAP1
1204 /* Create chain of DMA channesls */
1205 static void create_dma_lch_chain(int lch_head, int lch_queue)
1206 {
1207         u32 l;
1208
1209         /* Check if this is the first link in chain */
1210         if (dma_chan[lch_head].next_linked_ch == -1) {
1211                 dma_chan[lch_head].next_linked_ch = lch_queue;
1212                 dma_chan[lch_head].prev_linked_ch = lch_queue;
1213                 dma_chan[lch_queue].next_linked_ch = lch_head;
1214                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1215         }
1216
1217         /* a link exists, link the new channel in circular chain */
1218         else {
1219                 dma_chan[lch_queue].next_linked_ch =
1220                                         dma_chan[lch_head].next_linked_ch;
1221                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1222                 dma_chan[lch_head].next_linked_ch = lch_queue;
1223                 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1224                                         lch_queue;
1225         }
1226
1227         l = p->dma_read(CLNK_CTRL, lch_head);
1228         l &= ~(0x1f);
1229         l |= lch_queue;
1230         p->dma_write(l, CLNK_CTRL, lch_head);
1231
1232         l = p->dma_read(CLNK_CTRL, lch_queue);
1233         l &= ~(0x1f);
1234         l |= (dma_chan[lch_queue].next_linked_ch);
1235         p->dma_write(l, CLNK_CTRL, lch_queue);
1236 }
1237
1238 /**
1239  * @brief omap_request_dma_chain : Request a chain of DMA channels
1240  *
1241  * @param dev_id - Device id using the dma channel
1242  * @param dev_name - Device name
1243  * @param callback - Call back function
1244  * @chain_id -
1245  * @no_of_chans - Number of channels requested
1246  * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1247  *                                            OMAP_DMA_DYNAMIC_CHAIN
1248  * @params - Channel parameters
1249  *
1250  * @return - Success : 0
1251  *           Failure: -EINVAL/-ENOMEM
1252  */
1253 int omap_request_dma_chain(int dev_id, const char *dev_name,
1254                            void (*callback) (int lch, u16 ch_status,
1255                                              void *data),
1256                            int *chain_id, int no_of_chans, int chain_mode,
1257                            struct omap_dma_channel_params params)
1258 {
1259         int *channels;
1260         int i, err;
1261
1262         /* Is the chain mode valid ? */
1263         if (chain_mode != OMAP_DMA_STATIC_CHAIN
1264                         && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1265                 printk(KERN_ERR "Invalid chain mode requested\n");
1266                 return -EINVAL;
1267         }
1268
1269         if (unlikely((no_of_chans < 1
1270                         || no_of_chans > dma_lch_count))) {
1271                 printk(KERN_ERR "Invalid Number of channels requested\n");
1272                 return -EINVAL;
1273         }
1274
1275         /*
1276          * Allocate a queue to maintain the status of the channels
1277          * in the chain
1278          */
1279         channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1280         if (channels == NULL) {
1281                 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1282                 return -ENOMEM;
1283         }
1284
1285         /* request and reserve DMA channels for the chain */
1286         for (i = 0; i < no_of_chans; i++) {
1287                 err = omap_request_dma(dev_id, dev_name,
1288                                         callback, NULL, &channels[i]);
1289                 if (err < 0) {
1290                         int j;
1291                         for (j = 0; j < i; j++)
1292                                 omap_free_dma(channels[j]);
1293                         kfree(channels);
1294                         printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1295                         return err;
1296                 }
1297                 dma_chan[channels[i]].prev_linked_ch = -1;
1298                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1299
1300                 /*
1301                  * Allowing client drivers to set common parameters now,
1302                  * so that later only relevant (src_start, dest_start
1303                  * and element count) can be set
1304                  */
1305                 omap_set_dma_params(channels[i], &params);
1306         }
1307
1308         *chain_id = channels[0];
1309         dma_linked_lch[*chain_id].linked_dmach_q = channels;
1310         dma_linked_lch[*chain_id].chain_mode = chain_mode;
1311         dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1312         dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1313
1314         for (i = 0; i < no_of_chans; i++)
1315                 dma_chan[channels[i]].chain_id = *chain_id;
1316
1317         /* Reset the Queue pointers */
1318         OMAP_DMA_CHAIN_QINIT(*chain_id);
1319
1320         /* Set up the chain */
1321         if (no_of_chans == 1)
1322                 create_dma_lch_chain(channels[0], channels[0]);
1323         else {
1324                 for (i = 0; i < (no_of_chans - 1); i++)
1325                         create_dma_lch_chain(channels[i], channels[i + 1]);
1326         }
1327
1328         return 0;
1329 }
1330 EXPORT_SYMBOL(omap_request_dma_chain);
1331
1332 /**
1333  * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1334  * params after setting it. Dont do this while dma is running!!
1335  *
1336  * @param chain_id - Chained logical channel id.
1337  * @param params
1338  *
1339  * @return - Success : 0
1340  *           Failure : -EINVAL
1341  */
1342 int omap_modify_dma_chain_params(int chain_id,
1343                                 struct omap_dma_channel_params params)
1344 {
1345         int *channels;
1346         u32 i;
1347
1348         /* Check for input params */
1349         if (unlikely((chain_id < 0
1350                         || chain_id >= dma_lch_count))) {
1351                 printk(KERN_ERR "Invalid chain id\n");
1352                 return -EINVAL;
1353         }
1354
1355         /* Check if the chain exists */
1356         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1357                 printk(KERN_ERR "Chain doesn't exists\n");
1358                 return -EINVAL;
1359         }
1360         channels = dma_linked_lch[chain_id].linked_dmach_q;
1361
1362         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1363                 /*
1364                  * Allowing client drivers to set common parameters now,
1365                  * so that later only relevant (src_start, dest_start
1366                  * and element count) can be set
1367                  */
1368                 omap_set_dma_params(channels[i], &params);
1369         }
1370
1371         return 0;
1372 }
1373 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1374
1375 /**
1376  * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1377  *
1378  * @param chain_id
1379  *
1380  * @return - Success : 0
1381  *           Failure : -EINVAL
1382  */
1383 int omap_free_dma_chain(int chain_id)
1384 {
1385         int *channels;
1386         u32 i;
1387
1388         /* Check for input params */
1389         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1390                 printk(KERN_ERR "Invalid chain id\n");
1391                 return -EINVAL;
1392         }
1393
1394         /* Check if the chain exists */
1395         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1396                 printk(KERN_ERR "Chain doesn't exists\n");
1397                 return -EINVAL;
1398         }
1399
1400         channels = dma_linked_lch[chain_id].linked_dmach_q;
1401         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1402                 dma_chan[channels[i]].next_linked_ch = -1;
1403                 dma_chan[channels[i]].prev_linked_ch = -1;
1404                 dma_chan[channels[i]].chain_id = -1;
1405                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1406                 omap_free_dma(channels[i]);
1407         }
1408
1409         kfree(channels);
1410
1411         dma_linked_lch[chain_id].linked_dmach_q = NULL;
1412         dma_linked_lch[chain_id].chain_mode = -1;
1413         dma_linked_lch[chain_id].chain_state = -1;
1414
1415         return (0);
1416 }
1417 EXPORT_SYMBOL(omap_free_dma_chain);
1418
1419 /**
1420  * @brief omap_dma_chain_status - Check if the chain is in
1421  * active / inactive state.
1422  * @param chain_id
1423  *
1424  * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1425  *           Failure : -EINVAL
1426  */
1427 int omap_dma_chain_status(int chain_id)
1428 {
1429         /* Check for input params */
1430         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1431                 printk(KERN_ERR "Invalid chain id\n");
1432                 return -EINVAL;
1433         }
1434
1435         /* Check if the chain exists */
1436         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1437                 printk(KERN_ERR "Chain doesn't exists\n");
1438                 return -EINVAL;
1439         }
1440         pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1441                         dma_linked_lch[chain_id].q_count);
1442
1443         if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1444                 return OMAP_DMA_CHAIN_INACTIVE;
1445
1446         return OMAP_DMA_CHAIN_ACTIVE;
1447 }
1448 EXPORT_SYMBOL(omap_dma_chain_status);
1449
1450 /**
1451  * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1452  * set the params and start the transfer.
1453  *
1454  * @param chain_id
1455  * @param src_start - buffer start address
1456  * @param dest_start - Dest address
1457  * @param elem_count
1458  * @param frame_count
1459  * @param callbk_data - channel callback parameter data.
1460  *
1461  * @return  - Success : 0
1462  *            Failure: -EINVAL/-EBUSY
1463  */
1464 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1465                         int elem_count, int frame_count, void *callbk_data)
1466 {
1467         int *channels;
1468         u32 l, lch;
1469         int start_dma = 0;
1470
1471         /*
1472          * if buffer size is less than 1 then there is
1473          * no use of starting the chain
1474          */
1475         if (elem_count < 1) {
1476                 printk(KERN_ERR "Invalid buffer size\n");
1477                 return -EINVAL;
1478         }
1479
1480         /* Check for input params */
1481         if (unlikely((chain_id < 0
1482                         || chain_id >= dma_lch_count))) {
1483                 printk(KERN_ERR "Invalid chain id\n");
1484                 return -EINVAL;
1485         }
1486
1487         /* Check if the chain exists */
1488         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1489                 printk(KERN_ERR "Chain doesn't exist\n");
1490                 return -EINVAL;
1491         }
1492
1493         /* Check if all the channels in chain are in use */
1494         if (OMAP_DMA_CHAIN_QFULL(chain_id))
1495                 return -EBUSY;
1496
1497         /* Frame count may be negative in case of indexed transfers */
1498         channels = dma_linked_lch[chain_id].linked_dmach_q;
1499
1500         /* Get a free channel */
1501         lch = channels[dma_linked_lch[chain_id].q_tail];
1502
1503         /* Store the callback data */
1504         dma_chan[lch].data = callbk_data;
1505
1506         /* Increment the q_tail */
1507         OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1508
1509         /* Set the params to the free channel */
1510         if (src_start != 0)
1511                 p->dma_write(src_start, CSSA, lch);
1512         if (dest_start != 0)
1513                 p->dma_write(dest_start, CDSA, lch);
1514
1515         /* Write the buffer size */
1516         p->dma_write(elem_count, CEN, lch);
1517         p->dma_write(frame_count, CFN, lch);
1518
1519         /*
1520          * If the chain is dynamically linked,
1521          * then we may have to start the chain if its not active
1522          */
1523         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1524
1525                 /*
1526                  * In Dynamic chain, if the chain is not started,
1527                  * queue the channel
1528                  */
1529                 if (dma_linked_lch[chain_id].chain_state ==
1530                                                 DMA_CHAIN_NOTSTARTED) {
1531                         /* Enable the link in previous channel */
1532                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1533                                                                 DMA_CH_QUEUED)
1534                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1535                         dma_chan[lch].state = DMA_CH_QUEUED;
1536                 }
1537
1538                 /*
1539                  * Chain is already started, make sure its active,
1540                  * if not then start the chain
1541                  */
1542                 else {
1543                         start_dma = 1;
1544
1545                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1546                                                         DMA_CH_STARTED) {
1547                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1548                                 dma_chan[lch].state = DMA_CH_QUEUED;
1549                                 start_dma = 0;
1550                                 if (0 == ((1 << 7) & p->dma_read(
1551                                         CCR, dma_chan[lch].prev_linked_ch))) {
1552                                         disable_lnk(dma_chan[lch].
1553                                                     prev_linked_ch);
1554                                         pr_debug("\n prev ch is stopped\n");
1555                                         start_dma = 1;
1556                                 }
1557                         }
1558
1559                         else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1560                                                         == DMA_CH_QUEUED) {
1561                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1562                                 dma_chan[lch].state = DMA_CH_QUEUED;
1563                                 start_dma = 0;
1564                         }
1565                         omap_enable_channel_irq(lch);
1566
1567                         l = p->dma_read(CCR, lch);
1568
1569                         if ((0 == (l & (1 << 24))))
1570                                 l &= ~(1 << 25);
1571                         else
1572                                 l |= (1 << 25);
1573                         if (start_dma == 1) {
1574                                 if (0 == (l & (1 << 7))) {
1575                                         l |= (1 << 7);
1576                                         dma_chan[lch].state = DMA_CH_STARTED;
1577                                         pr_debug("starting %d\n", lch);
1578                                         p->dma_write(l, CCR, lch);
1579                                 } else
1580                                         start_dma = 0;
1581                         } else {
1582                                 if (0 == (l & (1 << 7)))
1583                                         p->dma_write(l, CCR, lch);
1584                         }
1585                         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1586                 }
1587         }
1588
1589         return 0;
1590 }
1591 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1592
1593 /**
1594  * @brief omap_start_dma_chain_transfers - Start the chain
1595  *
1596  * @param chain_id
1597  *
1598  * @return - Success : 0
1599  *           Failure : -EINVAL/-EBUSY
1600  */
1601 int omap_start_dma_chain_transfers(int chain_id)
1602 {
1603         int *channels;
1604         u32 l, i;
1605
1606         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1607                 printk(KERN_ERR "Invalid chain id\n");
1608                 return -EINVAL;
1609         }
1610
1611         channels = dma_linked_lch[chain_id].linked_dmach_q;
1612
1613         if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1614                 printk(KERN_ERR "Chain is already started\n");
1615                 return -EBUSY;
1616         }
1617
1618         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1619                 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1620                                                                         i++) {
1621                         enable_lnk(channels[i]);
1622                         omap_enable_channel_irq(channels[i]);
1623                 }
1624         } else {
1625                 omap_enable_channel_irq(channels[0]);
1626         }
1627
1628         l = p->dma_read(CCR, channels[0]);
1629         l |= (1 << 7);
1630         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1631         dma_chan[channels[0]].state = DMA_CH_STARTED;
1632
1633         if ((0 == (l & (1 << 24))))
1634                 l &= ~(1 << 25);
1635         else
1636                 l |= (1 << 25);
1637         p->dma_write(l, CCR, channels[0]);
1638
1639         dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1640
1641         return 0;
1642 }
1643 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1644
1645 /**
1646  * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1647  *
1648  * @param chain_id
1649  *
1650  * @return - Success : 0
1651  *           Failure : EINVAL
1652  */
1653 int omap_stop_dma_chain_transfers(int chain_id)
1654 {
1655         int *channels;
1656         u32 l, i;
1657         u32 sys_cf = 0;
1658
1659         /* Check for input params */
1660         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1661                 printk(KERN_ERR "Invalid chain id\n");
1662                 return -EINVAL;
1663         }
1664
1665         /* Check if the chain exists */
1666         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1667                 printk(KERN_ERR "Chain doesn't exists\n");
1668                 return -EINVAL;
1669         }
1670         channels = dma_linked_lch[chain_id].linked_dmach_q;
1671
1672         if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1673                 sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1674                 l = sys_cf;
1675                 /* Middle mode reg set no Standby */
1676                 l &= ~((1 << 12)|(1 << 13));
1677                 p->dma_write(l, OCP_SYSCONFIG, 0);
1678         }
1679
1680         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1681
1682                 /* Stop the Channel transmission */
1683                 l = p->dma_read(CCR, channels[i]);
1684                 l &= ~(1 << 7);
1685                 p->dma_write(l, CCR, channels[i]);
1686
1687                 /* Disable the link in all the channels */
1688                 disable_lnk(channels[i]);
1689                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1690
1691         }
1692         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1693
1694         /* Reset the Queue pointers */
1695         OMAP_DMA_CHAIN_QINIT(chain_id);
1696
1697         if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1698                 p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1699
1700         return 0;
1701 }
1702 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1703
1704 /* Get the index of the ongoing DMA in chain */
1705 /**
1706  * @brief omap_get_dma_chain_index - Get the element and frame index
1707  * of the ongoing DMA in chain
1708  *
1709  * @param chain_id
1710  * @param ei - Element index
1711  * @param fi - Frame index
1712  *
1713  * @return - Success : 0
1714  *           Failure : -EINVAL
1715  */
1716 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1717 {
1718         int lch;
1719         int *channels;
1720
1721         /* Check for input params */
1722         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1723                 printk(KERN_ERR "Invalid chain id\n");
1724                 return -EINVAL;
1725         }
1726
1727         /* Check if the chain exists */
1728         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1729                 printk(KERN_ERR "Chain doesn't exists\n");
1730                 return -EINVAL;
1731         }
1732         if ((!ei) || (!fi))
1733                 return -EINVAL;
1734
1735         channels = dma_linked_lch[chain_id].linked_dmach_q;
1736
1737         /* Get the current channel */
1738         lch = channels[dma_linked_lch[chain_id].q_head];
1739
1740         *ei = p->dma_read(CCEN, lch);
1741         *fi = p->dma_read(CCFN, lch);
1742
1743         return 0;
1744 }
1745 EXPORT_SYMBOL(omap_get_dma_chain_index);
1746
1747 /**
1748  * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1749  * ongoing DMA in chain
1750  *
1751  * @param chain_id
1752  *
1753  * @return - Success : Destination position
1754  *           Failure : -EINVAL
1755  */
1756 int omap_get_dma_chain_dst_pos(int chain_id)
1757 {
1758         int lch;
1759         int *channels;
1760
1761         /* Check for input params */
1762         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1763                 printk(KERN_ERR "Invalid chain id\n");
1764                 return -EINVAL;
1765         }
1766
1767         /* Check if the chain exists */
1768         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1769                 printk(KERN_ERR "Chain doesn't exists\n");
1770                 return -EINVAL;
1771         }
1772
1773         channels = dma_linked_lch[chain_id].linked_dmach_q;
1774
1775         /* Get the current channel */
1776         lch = channels[dma_linked_lch[chain_id].q_head];
1777
1778         return p->dma_read(CDAC, lch);
1779 }
1780 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1781
1782 /**
1783  * @brief omap_get_dma_chain_src_pos - Get the source position
1784  * of the ongoing DMA in chain
1785  * @param chain_id
1786  *
1787  * @return - Success : Destination position
1788  *           Failure : -EINVAL
1789  */
1790 int omap_get_dma_chain_src_pos(int chain_id)
1791 {
1792         int lch;
1793         int *channels;
1794
1795         /* Check for input params */
1796         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1797                 printk(KERN_ERR "Invalid chain id\n");
1798                 return -EINVAL;
1799         }
1800
1801         /* Check if the chain exists */
1802         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1803                 printk(KERN_ERR "Chain doesn't exists\n");
1804                 return -EINVAL;
1805         }
1806
1807         channels = dma_linked_lch[chain_id].linked_dmach_q;
1808
1809         /* Get the current channel */
1810         lch = channels[dma_linked_lch[chain_id].q_head];
1811
1812         return p->dma_read(CSAC, lch);
1813 }
1814 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1815 #endif  /* ifndef CONFIG_ARCH_OMAP1 */
1816
1817 /*----------------------------------------------------------------------------*/
1818
1819 #ifdef CONFIG_ARCH_OMAP1
1820
1821 static int omap1_dma_handle_ch(int ch)
1822 {
1823         u32 csr;
1824
1825         if (enable_1510_mode && ch >= 6) {
1826                 csr = dma_chan[ch].saved_csr;
1827                 dma_chan[ch].saved_csr = 0;
1828         } else
1829                 csr = p->dma_read(CSR, ch);
1830         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1831                 dma_chan[ch + 6].saved_csr = csr >> 7;
1832                 csr &= 0x7f;
1833         }
1834         if ((csr & 0x3f) == 0)
1835                 return 0;
1836         if (unlikely(dma_chan[ch].dev_id == -1)) {
1837                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1838                        "%d (CSR %04x)\n", ch, csr);
1839                 return 0;
1840         }
1841         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1842                 printk(KERN_WARNING "DMA timeout with device %d\n",
1843                        dma_chan[ch].dev_id);
1844         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1845                 printk(KERN_WARNING "DMA synchronization event drop occurred "
1846                        "with device %d\n", dma_chan[ch].dev_id);
1847         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1848                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1849         if (likely(dma_chan[ch].callback != NULL))
1850                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1851
1852         return 1;
1853 }
1854
1855 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1856 {
1857         int ch = ((int) dev_id) - 1;
1858         int handled = 0;
1859
1860         for (;;) {
1861                 int handled_now = 0;
1862
1863                 handled_now += omap1_dma_handle_ch(ch);
1864                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1865                         handled_now += omap1_dma_handle_ch(ch + 6);
1866                 if (!handled_now)
1867                         break;
1868                 handled += handled_now;
1869         }
1870
1871         return handled ? IRQ_HANDLED : IRQ_NONE;
1872 }
1873
1874 #else
1875 #define omap1_dma_irq_handler   NULL
1876 #endif
1877
1878 #ifdef CONFIG_ARCH_OMAP2PLUS
1879
1880 static int omap2_dma_handle_ch(int ch)
1881 {
1882         u32 status = p->dma_read(CSR, ch);
1883
1884         if (!status) {
1885                 if (printk_ratelimit())
1886                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1887                                 ch);
1888                 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1889                 return 0;
1890         }
1891         if (unlikely(dma_chan[ch].dev_id == -1)) {
1892                 if (printk_ratelimit())
1893                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1894                                         "channel %d\n", status, ch);
1895                 return 0;
1896         }
1897         if (unlikely(status & OMAP_DMA_DROP_IRQ))
1898                 printk(KERN_INFO
1899                        "DMA synchronization event drop occurred with device "
1900                        "%d\n", dma_chan[ch].dev_id);
1901         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1902                 printk(KERN_INFO "DMA transaction error with device %d\n",
1903                        dma_chan[ch].dev_id);
1904                 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1905                         u32 ccr;
1906
1907                         ccr = p->dma_read(CCR, ch);
1908                         ccr &= ~OMAP_DMA_CCR_EN;
1909                         p->dma_write(ccr, CCR, ch);
1910                         dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1911                 }
1912         }
1913         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1914                 printk(KERN_INFO "DMA secure error with device %d\n",
1915                        dma_chan[ch].dev_id);
1916         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1917                 printk(KERN_INFO "DMA misaligned error with device %d\n",
1918                        dma_chan[ch].dev_id);
1919
1920         p->dma_write(status, CSR, ch);
1921         p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1922         /* read back the register to flush the write */
1923         p->dma_read(IRQSTATUS_L0, ch);
1924
1925         /* If the ch is not chained then chain_id will be -1 */
1926         if (dma_chan[ch].chain_id != -1) {
1927                 int chain_id = dma_chan[ch].chain_id;
1928                 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1929                 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1930                         dma_chan[dma_chan[ch].next_linked_ch].state =
1931                                                         DMA_CH_STARTED;
1932                 if (dma_linked_lch[chain_id].chain_mode ==
1933                                                 OMAP_DMA_DYNAMIC_CHAIN)
1934                         disable_lnk(ch);
1935
1936                 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1937                         OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1938
1939                 status = p->dma_read(CSR, ch);
1940                 p->dma_write(status, CSR, ch);
1941         }
1942
1943         if (likely(dma_chan[ch].callback != NULL))
1944                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1945
1946         return 0;
1947 }
1948
1949 /* STATUS register count is from 1-32 while our is 0-31 */
1950 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1951 {
1952         u32 val, enable_reg;
1953         int i;
1954
1955         val = p->dma_read(IRQSTATUS_L0, 0);
1956         if (val == 0) {
1957                 if (printk_ratelimit())
1958                         printk(KERN_WARNING "Spurious DMA IRQ\n");
1959                 return IRQ_HANDLED;
1960         }
1961         enable_reg = p->dma_read(IRQENABLE_L0, 0);
1962         val &= enable_reg; /* Dispatch only relevant interrupts */
1963         for (i = 0; i < dma_lch_count && val != 0; i++) {
1964                 if (val & 1)
1965                         omap2_dma_handle_ch(i);
1966                 val >>= 1;
1967         }
1968
1969         return IRQ_HANDLED;
1970 }
1971
1972 static struct irqaction omap24xx_dma_irq = {
1973         .name = "DMA",
1974         .handler = omap2_dma_irq_handler,
1975         .flags = IRQF_DISABLED
1976 };
1977
1978 #else
1979 static struct irqaction omap24xx_dma_irq;
1980 #endif
1981
1982 /*----------------------------------------------------------------------------*/
1983
1984 void omap_dma_global_context_save(void)
1985 {
1986         omap_dma_global_context.dma_irqenable_l0 =
1987                 p->dma_read(IRQENABLE_L0, 0);
1988         omap_dma_global_context.dma_ocp_sysconfig =
1989                 p->dma_read(OCP_SYSCONFIG, 0);
1990         omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
1991 }
1992
1993 void omap_dma_global_context_restore(void)
1994 {
1995         int ch;
1996
1997         p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
1998         p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1999                 OCP_SYSCONFIG, 0);
2000         p->dma_write(omap_dma_global_context.dma_irqenable_l0,
2001                 IRQENABLE_L0, 0);
2002
2003         if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2004                 p->dma_write(0x3 , IRQSTATUS_L0, 0);
2005
2006         for (ch = 0; ch < dma_chan_count; ch++)
2007                 if (dma_chan[ch].dev_id != -1)
2008                         omap_clear_dma(ch);
2009 }
2010
2011 static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2012 {
2013         int ch, ret = 0;
2014         int dma_irq;
2015         char irq_name[4];
2016         int irq_rel;
2017
2018         p = pdev->dev.platform_data;
2019         if (!p) {
2020                 dev_err(&pdev->dev, "%s: System DMA initialized without"
2021                         "platform data\n", __func__);
2022                 return -EINVAL;
2023         }
2024
2025         d                       = p->dma_attr;
2026         errata                  = p->errata;
2027
2028         if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2029                         && (omap_dma_reserve_channels <= dma_lch_count))
2030                 d->lch_count    = omap_dma_reserve_channels;
2031
2032         dma_lch_count           = d->lch_count;
2033         dma_chan_count          = dma_lch_count;
2034         dma_chan                = d->chan;
2035         enable_1510_mode        = d->dev_caps & ENABLE_1510_MODE;
2036
2037         if (cpu_class_is_omap2()) {
2038                 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2039                                                 dma_lch_count, GFP_KERNEL);
2040                 if (!dma_linked_lch) {
2041                         ret = -ENOMEM;
2042                         goto exit_dma_lch_fail;
2043                 }
2044         }
2045
2046         spin_lock_init(&dma_chan_lock);
2047         for (ch = 0; ch < dma_chan_count; ch++) {
2048                 omap_clear_dma(ch);
2049                 if (cpu_class_is_omap2())
2050                         omap2_disable_irq_lch(ch);
2051
2052                 dma_chan[ch].dev_id = -1;
2053                 dma_chan[ch].next_lch = -1;
2054
2055                 if (ch >= 6 && enable_1510_mode)
2056                         continue;
2057
2058                 if (cpu_class_is_omap1()) {
2059                         /*
2060                          * request_irq() doesn't like dev_id (ie. ch) being
2061                          * zero, so we have to kludge around this.
2062                          */
2063                         sprintf(&irq_name[0], "%d", ch);
2064                         dma_irq = platform_get_irq_byname(pdev, irq_name);
2065
2066                         if (dma_irq < 0) {
2067                                 ret = dma_irq;
2068                                 goto exit_dma_irq_fail;
2069                         }
2070
2071                         /* INT_DMA_LCD is handled in lcd_dma.c */
2072                         if (dma_irq == INT_DMA_LCD)
2073                                 continue;
2074
2075                         ret = request_irq(dma_irq,
2076                                         omap1_dma_irq_handler, 0, "DMA",
2077                                         (void *) (ch + 1));
2078                         if (ret != 0)
2079                                 goto exit_dma_irq_fail;
2080                 }
2081         }
2082
2083         if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2084                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2085                                 DMA_DEFAULT_FIFO_DEPTH, 0);
2086
2087         if (cpu_class_is_omap2()) {
2088                 strcpy(irq_name, "0");
2089                 dma_irq = platform_get_irq_byname(pdev, irq_name);
2090                 if (dma_irq < 0) {
2091                         dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2092                         goto exit_dma_lch_fail;
2093                 }
2094                 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2095                 if (ret) {
2096                         dev_err(&pdev->dev, "set_up failed for IRQ %d"
2097                                 "for DMA (error %d)\n", dma_irq, ret);
2098                         goto exit_dma_lch_fail;
2099                 }
2100         }
2101
2102         /* reserve dma channels 0 and 1 in high security devices */
2103         if (cpu_is_omap34xx() &&
2104                 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2105                 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2106                                 "HS ROM code\n");
2107                 dma_chan[0].dev_id = 0;
2108                 dma_chan[1].dev_id = 1;
2109         }
2110         p->show_dma_caps();
2111         return 0;
2112
2113 exit_dma_irq_fail:
2114         dev_err(&pdev->dev, "unable to request IRQ %d"
2115                         "for DMA (error %d)\n", dma_irq, ret);
2116         for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2117                 dma_irq = platform_get_irq(pdev, irq_rel);
2118                 free_irq(dma_irq, (void *)(irq_rel + 1));
2119         }
2120
2121 exit_dma_lch_fail:
2122         kfree(p);
2123         kfree(d);
2124         kfree(dma_chan);
2125         return ret;
2126 }
2127
2128 static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2129 {
2130         int dma_irq;
2131
2132         if (cpu_class_is_omap2()) {
2133                 char irq_name[4];
2134                 strcpy(irq_name, "0");
2135                 dma_irq = platform_get_irq_byname(pdev, irq_name);
2136                 remove_irq(dma_irq, &omap24xx_dma_irq);
2137         } else {
2138                 int irq_rel = 0;
2139                 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2140                         dma_irq = platform_get_irq(pdev, irq_rel);
2141                         free_irq(dma_irq, (void *)(irq_rel + 1));
2142                 }
2143         }
2144         kfree(p);
2145         kfree(d);
2146         kfree(dma_chan);
2147         return 0;
2148 }
2149
2150 static struct platform_driver omap_system_dma_driver = {
2151         .probe          = omap_system_dma_probe,
2152         .remove         = __devexit_p(omap_system_dma_remove),
2153         .driver         = {
2154                 .name   = "omap_dma_system"
2155         },
2156 };
2157
2158 static int __init omap_system_dma_init(void)
2159 {
2160         return platform_driver_register(&omap_system_dma_driver);
2161 }
2162 arch_initcall(omap_system_dma_init);
2163
2164 static void __exit omap_system_dma_exit(void)
2165 {
2166         platform_driver_unregister(&omap_system_dma_driver);
2167 }
2168
2169 MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2170 MODULE_LICENSE("GPL");
2171 MODULE_ALIAS("platform:" DRIVER_NAME);
2172 MODULE_AUTHOR("Texas Instruments Inc");
2173
2174 /*
2175  * Reserve the omap SDMA channels using cmdline bootarg
2176  * "omap_dma_reserve_ch=". The valid range is 1 to 32
2177  */
2178 static int __init omap_dma_cmdline_reserve_ch(char *str)
2179 {
2180         if (get_option(&str, &omap_dma_reserve_channels) != 1)
2181                 omap_dma_reserve_channels = 0;
2182         return 1;
2183 }
2184
2185 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2186
2187