2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
28 #include <linux/of_dma.h>
29 #include <linux/err.h>
30 #include <linux/pm_runtime.h>
32 #include "dmaengine.h"
33 #define PL330_MAX_CHAN 8
34 #define PL330_MAX_IRQS 32
35 #define PL330_MAX_PERI 32
36 #define PL330_MAX_BURST 16
38 #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
40 enum pl330_cachectrl {
41 CCTRL0, /* Noncacheable and nonbufferable */
42 CCTRL1, /* Bufferable only */
43 CCTRL2, /* Cacheable, but do not allocate */
44 CCTRL3, /* Cacheable and bufferable, but do not allocate */
45 INVALID1, /* AWCACHE = 0x1000 */
47 CCTRL6, /* Cacheable write-through, allocate on writes only */
48 CCTRL7, /* Cacheable write-back, allocate on writes only */
59 /* Register and Bit field Definitions */
61 #define DS_ST_STOP 0x0
62 #define DS_ST_EXEC 0x1
63 #define DS_ST_CMISS 0x2
64 #define DS_ST_UPDTPC 0x3
66 #define DS_ST_ATBRR 0x5
67 #define DS_ST_QBUSY 0x6
69 #define DS_ST_KILL 0x8
70 #define DS_ST_CMPLT 0x9
71 #define DS_ST_FLTCMP 0xe
72 #define DS_ST_FAULT 0xf
77 #define INTSTATUS 0x28
84 #define FTC(n) (_FTC + (n)*0x4)
87 #define CS(n) (_CS + (n)*0x8)
88 #define CS_CNS (1 << 21)
91 #define CPC(n) (_CPC + (n)*0x8)
94 #define SA(n) (_SA + (n)*0x20)
97 #define DA(n) (_DA + (n)*0x20)
100 #define CC(n) (_CC + (n)*0x20)
102 #define CC_SRCINC (1 << 0)
103 #define CC_DSTINC (1 << 14)
104 #define CC_SRCPRI (1 << 8)
105 #define CC_DSTPRI (1 << 22)
106 #define CC_SRCNS (1 << 9)
107 #define CC_DSTNS (1 << 23)
108 #define CC_SRCIA (1 << 10)
109 #define CC_DSTIA (1 << 24)
110 #define CC_SRCBRSTLEN_SHFT 4
111 #define CC_DSTBRSTLEN_SHFT 18
112 #define CC_SRCBRSTSIZE_SHFT 1
113 #define CC_DSTBRSTSIZE_SHFT 15
114 #define CC_SRCCCTRL_SHFT 11
115 #define CC_SRCCCTRL_MASK 0x7
116 #define CC_DSTCCTRL_SHFT 25
117 #define CC_DRCCCTRL_MASK 0x7
118 #define CC_SWAP_SHFT 28
121 #define LC0(n) (_LC0 + (n)*0x20)
124 #define LC1(n) (_LC1 + (n)*0x20)
126 #define DBGSTATUS 0xd00
127 #define DBG_BUSY (1 << 0)
130 #define DBGINST0 0xd08
131 #define DBGINST1 0xd0c
140 #define PERIPH_ID 0xfe0
141 #define PERIPH_REV_SHIFT 20
142 #define PERIPH_REV_MASK 0xf
143 #define PERIPH_REV_R0P0 0
144 #define PERIPH_REV_R1P0 1
145 #define PERIPH_REV_R1P1 2
147 #define CR0_PERIPH_REQ_SET (1 << 0)
148 #define CR0_BOOT_EN_SET (1 << 1)
149 #define CR0_BOOT_MAN_NS (1 << 2)
150 #define CR0_NUM_CHANS_SHIFT 4
151 #define CR0_NUM_CHANS_MASK 0x7
152 #define CR0_NUM_PERIPH_SHIFT 12
153 #define CR0_NUM_PERIPH_MASK 0x1f
154 #define CR0_NUM_EVENTS_SHIFT 17
155 #define CR0_NUM_EVENTS_MASK 0x1f
157 #define CR1_ICACHE_LEN_SHIFT 0
158 #define CR1_ICACHE_LEN_MASK 0x7
159 #define CR1_NUM_ICACHELINES_SHIFT 4
160 #define CR1_NUM_ICACHELINES_MASK 0xf
162 #define CRD_DATA_WIDTH_SHIFT 0
163 #define CRD_DATA_WIDTH_MASK 0x7
164 #define CRD_WR_CAP_SHIFT 4
165 #define CRD_WR_CAP_MASK 0x7
166 #define CRD_WR_Q_DEP_SHIFT 8
167 #define CRD_WR_Q_DEP_MASK 0xf
168 #define CRD_RD_CAP_SHIFT 12
169 #define CRD_RD_CAP_MASK 0x7
170 #define CRD_RD_Q_DEP_SHIFT 16
171 #define CRD_RD_Q_DEP_MASK 0xf
172 #define CRD_DATA_BUFF_SHIFT 20
173 #define CRD_DATA_BUFF_MASK 0x3ff
176 #define DESIGNER 0x41
178 #define INTEG_CFG 0x0
179 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
181 #define PL330_STATE_STOPPED (1 << 0)
182 #define PL330_STATE_EXECUTING (1 << 1)
183 #define PL330_STATE_WFE (1 << 2)
184 #define PL330_STATE_FAULTING (1 << 3)
185 #define PL330_STATE_COMPLETING (1 << 4)
186 #define PL330_STATE_WFP (1 << 5)
187 #define PL330_STATE_KILLING (1 << 6)
188 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
189 #define PL330_STATE_CACHEMISS (1 << 8)
190 #define PL330_STATE_UPDTPC (1 << 9)
191 #define PL330_STATE_ATBARRIER (1 << 10)
192 #define PL330_STATE_QUEUEBUSY (1 << 11)
193 #define PL330_STATE_INVALID (1 << 15)
195 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
196 | PL330_STATE_WFE | PL330_STATE_FAULTING)
198 #define CMD_DMAADDH 0x54
199 #define CMD_DMAEND 0x00
200 #define CMD_DMAFLUSHP 0x35
201 #define CMD_DMAGO 0xa0
202 #define CMD_DMALD 0x04
203 #define CMD_DMALDP 0x25
204 #define CMD_DMALP 0x20
205 #define CMD_DMALPEND 0x28
206 #define CMD_DMAKILL 0x01
207 #define CMD_DMAMOV 0xbc
208 #define CMD_DMANOP 0x18
209 #define CMD_DMARMB 0x12
210 #define CMD_DMASEV 0x34
211 #define CMD_DMAST 0x08
212 #define CMD_DMASTP 0x29
213 #define CMD_DMASTZ 0x0c
214 #define CMD_DMAWFE 0x36
215 #define CMD_DMAWFP 0x30
216 #define CMD_DMAWMB 0x13
220 #define SZ_DMAFLUSHP 2
224 #define SZ_DMALPEND 2
238 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
239 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
241 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
242 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
243 #define BYTE_MOD_BURST_LEN(b, ccr) (((b) / BRST_SIZE(ccr)) % BRST_LEN(ccr))
246 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
247 * at 1byte/burst for P<->M and M<->M respectively.
248 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
249 * should be enough for P<->M and M<->M respectively.
251 #define MCODE_BUFF_PER_REQ 256
253 /* Use this _only_ to wait on transient states */
254 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
256 #ifdef PL330_DEBUG_MCGEN
257 static unsigned cmd_line;
258 #define PL330_DBGCMD_DUMP(off, x...) do { \
259 printk("%x:", cmd_line); \
263 #define PL330_DBGMC_START(addr) (cmd_line = addr)
265 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
266 #define PL330_DBGMC_START(addr) do {} while (0)
269 /* The number of default descriptors */
271 #define NR_DEFAULT_DESC 16
273 /* Delay for runtime PM autosuspend, ms */
274 #define PL330_AUTOSUSPEND_DELAY 20
276 /* Populated by the PL330 core driver for DMA API driver's info */
277 struct pl330_config {
279 #define DMAC_MODE_NS (1 << 0)
281 unsigned int data_bus_width:10; /* In number of bits */
282 unsigned int data_buf_dep:11;
283 unsigned int num_chan:4;
284 unsigned int num_peri:6;
286 unsigned int num_events:6;
291 * Request Configuration.
292 * The PL330 core does not modify this and uses the last
293 * working configuration if the request doesn't provide any.
295 * The Client may want to provide this info only for the
296 * first request and a request with new settings.
298 struct pl330_reqcfg {
299 /* Address Incrementing */
304 * For now, the SRC & DST protection levels
305 * and burst size/length are assumed same.
311 unsigned brst_size:3; /* in power of 2 */
313 enum pl330_cachectrl dcctl;
314 enum pl330_cachectrl scctl;
315 enum pl330_byteswap swap;
316 struct pl330_config *pcfg;
320 * One cycle of DMAC operation.
321 * There may be more than one xfer in a request.
330 /* The xfer callbacks are made with one of these arguments. */
332 /* The all xfers in the request were success. */
334 /* If req aborted due to global error. */
336 /* If req failed due to problem with Channel. */
357 struct dma_pl330_desc;
362 struct dma_pl330_desc *desc;
365 /* ToBeDone for tasklet */
373 struct pl330_thread {
376 /* If the channel is not yet acquired by any client */
379 struct pl330_dmac *dmac;
380 /* Only two at a time */
381 struct _pl330_req req[2];
382 /* Index of the last enqueued request */
384 /* Index of the last submitted request or -1 if the DMA is stopped */
388 enum pl330_dmac_state {
395 /* In the DMAC pool */
398 * Allocated to some channel during prep_xxx
399 * Also may be sitting on the work_list.
403 * Sitting on the work_list and already submitted
404 * to the PL330 core. Not more than two descriptors
405 * of a channel can be BUSY at any time.
409 * Sitting on the channel work_list but xfer done
415 struct dma_pl330_chan {
416 /* Schedule desc completion */
417 struct tasklet_struct task;
419 /* DMA-Engine Channel */
420 struct dma_chan chan;
422 /* List of submitted descriptors */
423 struct list_head submitted_list;
424 /* List of issued descriptors */
425 struct list_head work_list;
426 /* List of completed descriptors */
427 struct list_head completed_list;
429 /* Pointer to the DMAC that manages this channel,
430 * NULL if the channel is available to be acquired.
431 * As the parent, this DMAC also provides descriptors
434 struct pl330_dmac *dmac;
436 /* To protect channel manipulation */
440 * Hardware channel thread of PL330 DMAC. NULL if the channel is
443 struct pl330_thread *thread;
445 /* For D-to-M and M-to-D channels */
446 int burst_sz; /* the peripheral fifo width */
447 int burst_len; /* the number of burst */
448 dma_addr_t fifo_addr;
450 /* for cyclic capability */
455 /* DMA-Engine Device */
456 struct dma_device ddma;
458 /* Holds info about sg limitations */
459 struct device_dma_parameters dma_parms;
461 /* Pool of descriptors available for the DMAC's channels */
462 struct list_head desc_pool;
463 /* To protect desc_pool manipulation */
464 spinlock_t pool_lock;
466 /* Size of MicroCode buffers for each channel. */
468 /* ioremap'ed address of PL330 registers. */
470 /* Populated by the PL330 core driver during pl330_add */
471 struct pl330_config pcfg;
474 /* Maximum possible events/irqs */
476 /* BUS address of MicroCode buffer */
477 dma_addr_t mcode_bus;
478 /* CPU address of MicroCode buffer */
480 /* List of all Channel threads */
481 struct pl330_thread *channels;
482 /* Pointer to the MANAGER thread */
483 struct pl330_thread *manager;
484 /* To handle bad news in interrupt */
485 struct tasklet_struct tasks;
486 struct _pl330_tbd dmac_tbd;
487 /* State of DMAC operation */
488 enum pl330_dmac_state state;
489 /* Holds list of reqs with due callbacks */
490 struct list_head req_done;
492 /* Peripheral channels connected to this DMAC */
493 unsigned int num_peripherals;
494 struct dma_pl330_chan *peripherals; /* keep at end */
495 /* set peripherals request type according to soc config*/
496 enum pl330_cond peripherals_req_type;
500 static struct pl330_of_quirks {
505 .quirk = "arm,pl330-broken-no-flushp",
506 .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
510 struct dma_pl330_desc {
511 /* To attach to a queue as child */
512 struct list_head node;
514 /* Descriptor for the DMA Engine API */
515 struct dma_async_tx_descriptor txd;
517 /* Xfer for PL330 core */
518 struct pl330_xfer px;
520 struct pl330_reqcfg rqcfg;
522 enum desc_status status;
527 /* The channel which currently holds this desc */
528 struct dma_pl330_chan *pchan;
530 enum dma_transfer_direction rqtype;
531 /* Index of peripheral for the xfer. */
533 /* Hook to attach to DMAC's list of reqs with due callback */
534 struct list_head rqd;
539 struct dma_pl330_desc *desc;
542 static inline bool _queue_empty(struct pl330_thread *thrd)
544 return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
547 static inline bool _queue_full(struct pl330_thread *thrd)
549 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
552 static inline bool is_manager(struct pl330_thread *thrd)
554 return thrd->dmac->manager == thrd;
557 /* If manager of the thread is in Non-Secure mode */
558 static inline bool _manager_ns(struct pl330_thread *thrd)
560 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
563 static inline u32 get_revision(u32 periph_id)
565 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
568 static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
569 enum pl330_dst da, u16 val)
574 buf[0] = CMD_DMAADDH;
576 *((__le16 *)&buf[1]) = cpu_to_le16(val);
578 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
579 da == 1 ? "DA" : "SA", val);
584 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
591 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
596 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
601 buf[0] = CMD_DMAFLUSHP;
607 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
612 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
620 buf[0] |= (0 << 1) | (1 << 0);
621 else if (cond == BURST)
622 buf[0] |= (1 << 1) | (1 << 0);
624 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
625 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
630 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
631 enum pl330_cond cond, u8 peri)
645 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
646 cond == SINGLE ? 'S' : 'B', peri >> 3);
651 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
652 unsigned loop, u8 cnt)
662 cnt--; /* DMAC increments by 1 internally */
665 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
671 enum pl330_cond cond;
677 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
678 const struct _arg_LPEND *arg)
680 enum pl330_cond cond = arg->cond;
681 bool forever = arg->forever;
682 unsigned loop = arg->loop;
683 u8 bjump = arg->bjump;
688 buf[0] = CMD_DMALPEND;
697 buf[0] |= (0 << 1) | (1 << 0);
698 else if (cond == BURST)
699 buf[0] |= (1 << 1) | (1 << 0);
703 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
704 forever ? "FE" : "END",
705 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
712 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
717 buf[0] = CMD_DMAKILL;
722 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
723 enum dmamov_dst dst, u32 val)
730 *((__le32 *)&buf[2]) = cpu_to_le32(val);
732 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
733 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
738 static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
745 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
750 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
757 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
762 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
773 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
778 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
786 buf[0] |= (0 << 1) | (1 << 0);
787 else if (cond == BURST)
788 buf[0] |= (1 << 1) | (1 << 0);
790 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
791 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
796 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
797 enum pl330_cond cond, u8 peri)
811 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
812 cond == SINGLE ? 'S' : 'B', peri >> 3);
817 static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
824 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
829 static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
844 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
845 ev >> 3, invalidate ? ", I" : "");
850 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
851 enum pl330_cond cond, u8 peri)
859 buf[0] |= (0 << 1) | (0 << 0);
860 else if (cond == BURST)
861 buf[0] |= (1 << 1) | (0 << 0);
863 buf[0] |= (0 << 1) | (1 << 0);
869 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
870 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
875 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
882 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
893 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
894 const struct _arg_GO *arg)
897 u32 addr = arg->addr;
898 unsigned ns = arg->ns;
908 *((__le32 *)&buf[2]) = cpu_to_le32(addr);
913 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
915 /* Returns Time-Out */
916 static bool _until_dmac_idle(struct pl330_thread *thrd)
918 void __iomem *regs = thrd->dmac->base;
919 unsigned long loops = msecs_to_loops(5);
922 /* Until Manager is Idle */
923 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
935 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
936 u8 insn[], bool as_manager)
938 void __iomem *regs = thrd->dmac->base;
941 val = (insn[0] << 16) | (insn[1] << 24);
944 val |= (thrd->id << 8); /* Channel Number */
946 writel(val, regs + DBGINST0);
948 val = le32_to_cpu(*((__le32 *)&insn[2]));
949 writel(val, regs + DBGINST1);
951 /* If timed out due to halted state-machine */
952 if (_until_dmac_idle(thrd)) {
953 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
958 writel(0, regs + DBGCMD);
961 static inline u32 _state(struct pl330_thread *thrd)
963 void __iomem *regs = thrd->dmac->base;
966 if (is_manager(thrd))
967 val = readl(regs + DS) & 0xf;
969 val = readl(regs + CS(thrd->id)) & 0xf;
973 return PL330_STATE_STOPPED;
975 return PL330_STATE_EXECUTING;
977 return PL330_STATE_CACHEMISS;
979 return PL330_STATE_UPDTPC;
981 return PL330_STATE_WFE;
983 return PL330_STATE_FAULTING;
985 if (is_manager(thrd))
986 return PL330_STATE_INVALID;
988 return PL330_STATE_ATBARRIER;
990 if (is_manager(thrd))
991 return PL330_STATE_INVALID;
993 return PL330_STATE_QUEUEBUSY;
995 if (is_manager(thrd))
996 return PL330_STATE_INVALID;
998 return PL330_STATE_WFP;
1000 if (is_manager(thrd))
1001 return PL330_STATE_INVALID;
1003 return PL330_STATE_KILLING;
1005 if (is_manager(thrd))
1006 return PL330_STATE_INVALID;
1008 return PL330_STATE_COMPLETING;
1010 if (is_manager(thrd))
1011 return PL330_STATE_INVALID;
1013 return PL330_STATE_FAULT_COMPLETING;
1015 return PL330_STATE_INVALID;
1019 static void _stop(struct pl330_thread *thrd)
1021 void __iomem *regs = thrd->dmac->base;
1022 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1024 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1025 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1027 /* Return if nothing needs to be done */
1028 if (_state(thrd) == PL330_STATE_COMPLETING
1029 || _state(thrd) == PL330_STATE_KILLING
1030 || _state(thrd) == PL330_STATE_STOPPED)
1033 _emit_KILL(0, insn);
1035 /* Stop generating interrupts for SEV */
1036 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1038 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1041 /* Start doing req 'idx' of thread 'thrd' */
1042 static bool _trigger(struct pl330_thread *thrd)
1044 void __iomem *regs = thrd->dmac->base;
1045 struct _pl330_req *req;
1046 struct dma_pl330_desc *desc;
1049 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1052 /* Return if already ACTIVE */
1053 if (_state(thrd) != PL330_STATE_STOPPED)
1056 idx = 1 - thrd->lstenq;
1057 if (thrd->req[idx].desc != NULL) {
1058 req = &thrd->req[idx];
1061 if (thrd->req[idx].desc != NULL)
1062 req = &thrd->req[idx];
1067 /* Return if no request */
1071 /* Return if req is running */
1072 if (idx == thrd->req_running)
1077 ns = desc->rqcfg.nonsecure ? 1 : 0;
1079 /* See 'Abort Sources' point-4 at Page 2-25 */
1080 if (_manager_ns(thrd) && !ns)
1081 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
1082 __func__, __LINE__);
1085 go.addr = req->mc_bus;
1087 _emit_GO(0, insn, &go);
1089 /* Set to generate interrupts for SEV */
1090 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1092 /* Only manager can execute GO */
1093 _execute_DBGINSN(thrd, insn, true);
1095 thrd->req_running = idx;
1100 static bool _start(struct pl330_thread *thrd)
1102 switch (_state(thrd)) {
1103 case PL330_STATE_FAULT_COMPLETING:
1104 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1106 if (_state(thrd) == PL330_STATE_KILLING)
1107 UNTIL(thrd, PL330_STATE_STOPPED)
1109 case PL330_STATE_FAULTING:
1112 case PL330_STATE_KILLING:
1113 case PL330_STATE_COMPLETING:
1114 UNTIL(thrd, PL330_STATE_STOPPED)
1116 case PL330_STATE_STOPPED:
1117 return _trigger(thrd);
1119 case PL330_STATE_WFP:
1120 case PL330_STATE_QUEUEBUSY:
1121 case PL330_STATE_ATBARRIER:
1122 case PL330_STATE_UPDTPC:
1123 case PL330_STATE_CACHEMISS:
1124 case PL330_STATE_EXECUTING:
1127 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1133 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1134 const struct _xfer_spec *pxs, int cyc)
1137 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1139 /* check lock-up free version */
1140 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1142 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1143 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1147 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1148 off += _emit_RMB(dry_run, &buf[off]);
1149 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1150 off += _emit_WMB(dry_run, &buf[off]);
1157 static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
1158 u8 buf[], const struct _xfer_spec *pxs,
1162 enum pl330_cond cond = pl330->peripherals_req_type;
1165 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1166 off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
1167 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1169 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1170 off += _emit_FLUSHP(dry_run, &buf[off],
1177 static inline int _ldst_memtodev(struct pl330_dmac *pl330,
1178 unsigned dry_run, u8 buf[],
1179 const struct _xfer_spec *pxs, int cyc)
1182 enum pl330_cond cond = pl330->peripherals_req_type;
1185 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1186 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1187 off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
1189 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1190 off += _emit_FLUSHP(dry_run, &buf[off],
1197 static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1198 const struct _xfer_spec *pxs, int cyc)
1202 switch (pxs->desc->rqtype) {
1203 case DMA_MEM_TO_DEV:
1204 off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
1206 case DMA_DEV_TO_MEM:
1207 off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
1209 case DMA_MEM_TO_MEM:
1210 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1213 off += 0x40000000; /* Scare off the Client */
1220 /* Returns bytes consumed and updates bursts */
1221 static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1222 unsigned long *bursts, const struct _xfer_spec *pxs)
1224 int cyc, cycmax, szlp, szlpend, szbrst, off;
1225 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1226 struct _arg_LPEND lpend;
1229 return _bursts(pl330, dry_run, buf, pxs, 1);
1231 /* Max iterations possible in DMALP is 256 */
1232 if (*bursts >= 256*256) {
1235 cyc = *bursts / lcnt1 / lcnt0;
1236 } else if (*bursts > 256) {
1238 lcnt0 = *bursts / lcnt1;
1246 szlp = _emit_LP(1, buf, 0, 0);
1247 szbrst = _bursts(pl330, 1, buf, pxs, 1);
1249 lpend.cond = ALWAYS;
1250 lpend.forever = false;
1253 szlpend = _emit_LPEND(1, buf, &lpend);
1261 * Max bursts that we can unroll due to limit on the
1262 * size of backward jump that can be encoded in DMALPEND
1263 * which is 8-bits and hence 255
1265 cycmax = (255 - (szlp + szlpend)) / szbrst;
1267 cyc = (cycmax < cyc) ? cycmax : cyc;
1272 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1276 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1279 off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
1281 lpend.cond = ALWAYS;
1282 lpend.forever = false;
1284 lpend.bjump = off - ljmp1;
1285 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1288 lpend.cond = ALWAYS;
1289 lpend.forever = false;
1291 lpend.bjump = off - ljmp0;
1292 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1295 *bursts = lcnt1 * cyc;
1302 static inline int _setup_loops(struct pl330_dmac *pl330,
1303 unsigned dry_run, u8 buf[],
1304 const struct _xfer_spec *pxs)
1306 struct pl330_xfer *x = &pxs->desc->px;
1308 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1313 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
1320 static inline int _setup_xfer(struct pl330_dmac *pl330,
1321 unsigned dry_run, u8 buf[],
1322 const struct _xfer_spec *pxs)
1324 struct pl330_xfer *x = &pxs->desc->px;
1327 /* DMAMOV SAR, x->src_addr */
1328 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1329 /* DMAMOV DAR, x->dst_addr */
1330 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1333 off += _setup_loops(pl330, dry_run, &buf[off], pxs);
1335 if (pl330->peripherals_req_type == BURST) {
1336 unsigned int ccr = pxs->ccr;
1337 unsigned long c = 0;
1339 c = BYTE_MOD_BURST_LEN(x->bytes, pxs->ccr);
1342 ccr &= ~(0xf << CC_SRCBRSTLEN_SHFT);
1343 ccr &= ~(0xf << CC_DSTBRSTLEN_SHFT);
1344 off += _emit_MOV(dry_run, &buf[off], CCR, ccr);
1345 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
1353 * A req is a sequence of one or more xfer units.
1354 * Returns the number of bytes taken to setup the MC for the req.
1356 static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
1357 struct pl330_thread *thrd, unsigned index,
1358 struct _xfer_spec *pxs)
1360 struct _pl330_req *req = &thrd->req[index];
1361 struct pl330_xfer *x;
1362 u8 *buf = req->mc_cpu;
1365 PL330_DBGMC_START(req->mc_bus);
1367 /* DMAMOV CCR, ccr */
1368 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1371 if (pl330->peripherals_req_type != BURST) {
1372 /* Error if xfer length is not aligned at burst size */
1373 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1377 off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
1379 /* DMASEV peripheral/event */
1380 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1382 off += _emit_END(dry_run, &buf[off]);
1387 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1397 /* We set same protection levels for Src and DST for now */
1398 if (rqc->privileged)
1399 ccr |= CC_SRCPRI | CC_DSTPRI;
1401 ccr |= CC_SRCNS | CC_DSTNS;
1402 if (rqc->insnaccess)
1403 ccr |= CC_SRCIA | CC_DSTIA;
1405 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1406 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1408 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1409 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1411 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1412 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1414 ccr |= (rqc->swap << CC_SWAP_SHFT);
1420 * Submit a list of xfers after which the client wants notification.
1421 * Client is not notified after each xfer unit, just once after all
1422 * xfer units are done or some error occurs.
1424 static int pl330_submit_req(struct pl330_thread *thrd,
1425 struct dma_pl330_desc *desc)
1427 struct pl330_dmac *pl330 = thrd->dmac;
1428 struct _xfer_spec xs;
1429 unsigned long flags;
1434 if (pl330->state == DYING
1435 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1436 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
1437 __func__, __LINE__);
1441 /* If request for non-existing peripheral */
1442 if (desc->rqtype != DMA_MEM_TO_MEM &&
1443 desc->peri >= pl330->pcfg.num_peri) {
1444 dev_info(thrd->dmac->ddma.dev,
1445 "%s:%d Invalid peripheral(%u)!\n",
1446 __func__, __LINE__, desc->peri);
1450 spin_lock_irqsave(&pl330->lock, flags);
1452 if (_queue_full(thrd)) {
1457 /* Prefer Secure Channel */
1458 if (!_manager_ns(thrd))
1459 desc->rqcfg.nonsecure = 0;
1461 desc->rqcfg.nonsecure = 1;
1463 ccr = _prepare_ccr(&desc->rqcfg);
1465 idx = thrd->req[0].desc == NULL ? 0 : 1;
1470 /* First dry run to check if req is acceptable */
1471 ret = _setup_req(pl330, 1, thrd, idx, &xs);
1475 if (ret > pl330->mcbufsz / 2) {
1476 dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
1477 __func__, __LINE__, ret, pl330->mcbufsz / 2);
1482 /* Hook the request */
1484 thrd->req[idx].desc = desc;
1485 _setup_req(pl330, 0, thrd, idx, &xs);
1490 spin_unlock_irqrestore(&pl330->lock, flags);
1495 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1497 struct dma_pl330_chan *pch;
1498 unsigned long flags;
1505 /* If desc aborted */
1509 spin_lock_irqsave(&pch->lock, flags);
1511 desc->status = DONE;
1513 spin_unlock_irqrestore(&pch->lock, flags);
1515 tasklet_schedule(&pch->task);
1518 static void pl330_dotask(unsigned long data)
1520 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1521 unsigned long flags;
1524 spin_lock_irqsave(&pl330->lock, flags);
1526 /* The DMAC itself gone nuts */
1527 if (pl330->dmac_tbd.reset_dmac) {
1528 pl330->state = DYING;
1529 /* Reset the manager too */
1530 pl330->dmac_tbd.reset_mngr = true;
1531 /* Clear the reset flag */
1532 pl330->dmac_tbd.reset_dmac = false;
1535 if (pl330->dmac_tbd.reset_mngr) {
1536 _stop(pl330->manager);
1537 /* Reset all channels */
1538 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
1539 /* Clear the reset flag */
1540 pl330->dmac_tbd.reset_mngr = false;
1543 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1545 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1546 struct pl330_thread *thrd = &pl330->channels[i];
1547 void __iomem *regs = pl330->base;
1548 enum pl330_op_err err;
1552 if (readl(regs + FSC) & (1 << thrd->id))
1553 err = PL330_ERR_FAIL;
1555 err = PL330_ERR_ABORT;
1557 spin_unlock_irqrestore(&pl330->lock, flags);
1558 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1559 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1560 spin_lock_irqsave(&pl330->lock, flags);
1562 thrd->req[0].desc = NULL;
1563 thrd->req[1].desc = NULL;
1564 thrd->req_running = -1;
1566 /* Clear the reset flag */
1567 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1571 spin_unlock_irqrestore(&pl330->lock, flags);
1576 /* Returns 1 if state was updated, 0 otherwise */
1577 static int pl330_update(struct pl330_dmac *pl330)
1579 struct dma_pl330_desc *descdone, *tmp;
1580 unsigned long flags;
1583 int id, ev, ret = 0;
1587 spin_lock_irqsave(&pl330->lock, flags);
1589 val = readl(regs + FSM) & 0x1;
1591 pl330->dmac_tbd.reset_mngr = true;
1593 pl330->dmac_tbd.reset_mngr = false;
1595 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
1596 pl330->dmac_tbd.reset_chan |= val;
1599 while (i < pl330->pcfg.num_chan) {
1600 if (val & (1 << i)) {
1601 dev_info(pl330->ddma.dev,
1602 "Reset Channel-%d\t CS-%x FTC-%x\n",
1603 i, readl(regs + CS(i)),
1604 readl(regs + FTC(i)));
1605 _stop(&pl330->channels[i]);
1611 /* Check which event happened i.e, thread notified */
1612 val = readl(regs + ES);
1613 if (pl330->pcfg.num_events < 32
1614 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
1615 pl330->dmac_tbd.reset_dmac = true;
1616 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1622 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
1623 if (val & (1 << ev)) { /* Event occurred */
1624 struct pl330_thread *thrd;
1625 u32 inten = readl(regs + INTEN);
1628 /* Clear the event */
1629 if (inten & (1 << ev))
1630 writel(1 << ev, regs + INTCLR);
1634 id = pl330->events[ev];
1636 thrd = &pl330->channels[id];
1638 active = thrd->req_running;
1639 if (active == -1) /* Aborted */
1642 /* Detach the req */
1643 descdone = thrd->req[active].desc;
1644 thrd->req[active].desc = NULL;
1646 thrd->req_running = -1;
1648 /* Get going again ASAP */
1651 /* For now, just make a list of callbacks to be done */
1652 list_add_tail(&descdone->rqd, &pl330->req_done);
1656 /* Now that we are in no hurry, do the callbacks */
1657 list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
1658 list_del(&descdone->rqd);
1659 spin_unlock_irqrestore(&pl330->lock, flags);
1660 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1661 spin_lock_irqsave(&pl330->lock, flags);
1665 spin_unlock_irqrestore(&pl330->lock, flags);
1667 if (pl330->dmac_tbd.reset_dmac
1668 || pl330->dmac_tbd.reset_mngr
1669 || pl330->dmac_tbd.reset_chan) {
1671 tasklet_schedule(&pl330->tasks);
1677 /* Reserve an event */
1678 static inline int _alloc_event(struct pl330_thread *thrd)
1680 struct pl330_dmac *pl330 = thrd->dmac;
1683 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
1684 if (pl330->events[ev] == -1) {
1685 pl330->events[ev] = thrd->id;
1692 static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1694 return pl330->pcfg.irq_ns & (1 << i);
1697 /* Upon success, returns IdentityToken for the
1698 * allocated channel, NULL otherwise.
1700 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1702 struct pl330_thread *thrd = NULL;
1703 unsigned long flags;
1706 if (pl330->state == DYING)
1709 chans = pl330->pcfg.num_chan;
1711 spin_lock_irqsave(&pl330->lock, flags);
1713 for (i = 0; i < chans; i++) {
1714 thrd = &pl330->channels[i];
1715 if ((thrd->free) && (!_manager_ns(thrd) ||
1716 _chan_ns(pl330, i))) {
1717 thrd->ev = _alloc_event(thrd);
1718 if (thrd->ev >= 0) {
1721 thrd->req[0].desc = NULL;
1722 thrd->req[1].desc = NULL;
1723 thrd->req_running = -1;
1730 spin_unlock_irqrestore(&pl330->lock, flags);
1735 /* Release an event */
1736 static inline void _free_event(struct pl330_thread *thrd, int ev)
1738 struct pl330_dmac *pl330 = thrd->dmac;
1740 /* If the event is valid and was held by the thread */
1741 if (ev >= 0 && ev < pl330->pcfg.num_events
1742 && pl330->events[ev] == thrd->id)
1743 pl330->events[ev] = -1;
1746 static void pl330_release_channel(struct pl330_thread *thrd)
1748 struct pl330_dmac *pl330;
1749 unsigned long flags;
1751 if (!thrd || thrd->free)
1756 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1757 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1761 spin_lock_irqsave(&pl330->lock, flags);
1762 _free_event(thrd, thrd->ev);
1764 spin_unlock_irqrestore(&pl330->lock, flags);
1767 /* Initialize the structure for PL330 configuration, that can be used
1768 * by the client driver the make best use of the DMAC
1770 static void read_dmac_config(struct pl330_dmac *pl330)
1772 void __iomem *regs = pl330->base;
1775 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1776 val &= CRD_DATA_WIDTH_MASK;
1777 pl330->pcfg.data_bus_width = 8 * (1 << val);
1779 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1780 val &= CRD_DATA_BUFF_MASK;
1781 pl330->pcfg.data_buf_dep = val + 1;
1783 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1784 val &= CR0_NUM_CHANS_MASK;
1786 pl330->pcfg.num_chan = val;
1788 val = readl(regs + CR0);
1789 if (val & CR0_PERIPH_REQ_SET) {
1790 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1792 pl330->pcfg.num_peri = val;
1793 pl330->pcfg.peri_ns = readl(regs + CR4);
1795 pl330->pcfg.num_peri = 0;
1798 val = readl(regs + CR0);
1799 if (val & CR0_BOOT_MAN_NS)
1800 pl330->pcfg.mode |= DMAC_MODE_NS;
1802 pl330->pcfg.mode &= ~DMAC_MODE_NS;
1804 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1805 val &= CR0_NUM_EVENTS_MASK;
1807 pl330->pcfg.num_events = val;
1809 pl330->pcfg.irq_ns = readl(regs + CR3);
1812 static inline void _reset_thread(struct pl330_thread *thrd)
1814 struct pl330_dmac *pl330 = thrd->dmac;
1816 thrd->req[0].mc_cpu = pl330->mcode_cpu
1817 + (thrd->id * pl330->mcbufsz);
1818 thrd->req[0].mc_bus = pl330->mcode_bus
1819 + (thrd->id * pl330->mcbufsz);
1820 thrd->req[0].desc = NULL;
1822 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1823 + pl330->mcbufsz / 2;
1824 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1825 + pl330->mcbufsz / 2;
1826 thrd->req[1].desc = NULL;
1828 thrd->req_running = -1;
1831 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1833 int chans = pl330->pcfg.num_chan;
1834 struct pl330_thread *thrd;
1837 /* Allocate 1 Manager and 'chans' Channel threads */
1838 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1840 if (!pl330->channels)
1843 /* Init Channel threads */
1844 for (i = 0; i < chans; i++) {
1845 thrd = &pl330->channels[i];
1848 _reset_thread(thrd);
1852 /* MANAGER is indexed at the end */
1853 thrd = &pl330->channels[chans];
1857 pl330->manager = thrd;
1862 static int dmac_alloc_resources(struct pl330_dmac *pl330)
1864 int chans = pl330->pcfg.num_chan;
1868 * Alloc MicroCode buffer for 'chans' Channel threads.
1869 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1871 pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
1872 chans * pl330->mcbufsz,
1873 &pl330->mcode_bus, GFP_KERNEL);
1874 if (!pl330->mcode_cpu) {
1875 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1876 __func__, __LINE__);
1880 ret = dmac_alloc_threads(pl330);
1882 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
1883 __func__, __LINE__);
1884 dma_free_coherent(pl330->ddma.dev,
1885 chans * pl330->mcbufsz,
1886 pl330->mcode_cpu, pl330->mcode_bus);
1893 static int pl330_add(struct pl330_dmac *pl330)
1900 /* Check if we can handle this DMAC */
1901 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1902 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1903 pl330->pcfg.periph_id);
1907 /* Read the configuration of the DMAC */
1908 read_dmac_config(pl330);
1910 if (pl330->pcfg.num_events == 0) {
1911 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
1912 __func__, __LINE__);
1916 spin_lock_init(&pl330->lock);
1918 INIT_LIST_HEAD(&pl330->req_done);
1920 /* Use default MC buffer size if not provided */
1921 if (!pl330->mcbufsz)
1922 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1924 /* Mark all events as free */
1925 for (i = 0; i < pl330->pcfg.num_events; i++)
1926 pl330->events[i] = -1;
1928 /* Allocate resources needed by the DMAC */
1929 ret = dmac_alloc_resources(pl330);
1931 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
1935 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1937 pl330->state = INIT;
1942 static int dmac_free_threads(struct pl330_dmac *pl330)
1944 struct pl330_thread *thrd;
1947 /* Release Channel threads */
1948 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1949 thrd = &pl330->channels[i];
1950 pl330_release_channel(thrd);
1954 kfree(pl330->channels);
1959 static void pl330_del(struct pl330_dmac *pl330)
1961 pl330->state = UNINIT;
1963 tasklet_kill(&pl330->tasks);
1965 /* Free DMAC resources */
1966 dmac_free_threads(pl330);
1968 dma_free_coherent(pl330->ddma.dev,
1969 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
1973 /* forward declaration */
1974 static struct amba_driver pl330_driver;
1976 static inline struct dma_pl330_chan *
1977 to_pchan(struct dma_chan *ch)
1982 return container_of(ch, struct dma_pl330_chan, chan);
1985 static inline struct dma_pl330_desc *
1986 to_desc(struct dma_async_tx_descriptor *tx)
1988 return container_of(tx, struct dma_pl330_desc, txd);
1991 static inline void fill_queue(struct dma_pl330_chan *pch)
1993 struct dma_pl330_desc *desc;
1996 list_for_each_entry(desc, &pch->work_list, node) {
1998 /* If already submitted */
1999 if (desc->status == BUSY)
2002 ret = pl330_submit_req(pch->thread, desc);
2004 desc->status = BUSY;
2005 } else if (ret == -EAGAIN) {
2006 /* QFull or DMAC Dying */
2009 /* Unacceptable request */
2010 desc->status = DONE;
2011 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
2012 __func__, __LINE__, desc->txd.cookie);
2013 tasklet_schedule(&pch->task);
2018 static void pl330_tasklet(unsigned long data)
2020 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2021 struct dma_pl330_desc *desc, *_dt;
2022 unsigned long flags;
2023 bool power_down = false;
2025 spin_lock_irqsave(&pch->lock, flags);
2027 /* Pick up ripe tomatoes */
2028 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2029 if (desc->status == DONE) {
2031 dma_cookie_complete(&desc->txd);
2032 list_move_tail(&desc->node, &pch->completed_list);
2035 /* Try to submit a req imm. next to the last completed cookie */
2038 if (list_empty(&pch->work_list)) {
2039 spin_lock(&pch->thread->dmac->lock);
2041 spin_unlock(&pch->thread->dmac->lock);
2044 /* Make sure the PL330 Channel thread is active */
2045 spin_lock(&pch->thread->dmac->lock);
2046 _start(pch->thread);
2047 spin_unlock(&pch->thread->dmac->lock);
2050 while (!list_empty(&pch->completed_list)) {
2051 dma_async_tx_callback callback;
2052 void *callback_param;
2054 desc = list_first_entry(&pch->completed_list,
2055 struct dma_pl330_desc, node);
2057 callback = desc->txd.callback;
2058 callback_param = desc->txd.callback_param;
2061 desc->status = PREP;
2062 list_move_tail(&desc->node, &pch->work_list);
2064 spin_lock(&pch->thread->dmac->lock);
2065 _start(pch->thread);
2066 spin_unlock(&pch->thread->dmac->lock);
2070 desc->status = FREE;
2071 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2074 dma_descriptor_unmap(&desc->txd);
2077 spin_unlock_irqrestore(&pch->lock, flags);
2078 callback(callback_param);
2079 spin_lock_irqsave(&pch->lock, flags);
2082 spin_unlock_irqrestore(&pch->lock, flags);
2084 /* If work list empty, power down */
2086 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2087 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2091 bool pl330_filter(struct dma_chan *chan, void *param)
2095 if (chan->device->dev->driver != &pl330_driver.drv)
2098 peri_id = chan->private;
2099 return *peri_id == (unsigned long)param;
2101 EXPORT_SYMBOL(pl330_filter);
2103 static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2104 struct of_dma *ofdma)
2106 int count = dma_spec->args_count;
2107 struct pl330_dmac *pl330 = ofdma->of_dma_data;
2108 unsigned int chan_id;
2116 chan_id = dma_spec->args[0];
2117 if (chan_id >= pl330->num_peripherals)
2120 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
2123 static int pl330_alloc_chan_resources(struct dma_chan *chan)
2125 struct dma_pl330_chan *pch = to_pchan(chan);
2126 struct pl330_dmac *pl330 = pch->dmac;
2127 unsigned long flags;
2129 spin_lock_irqsave(&pch->lock, flags);
2131 dma_cookie_init(chan);
2132 pch->cyclic = false;
2134 pch->thread = pl330_request_channel(pl330);
2136 spin_unlock_irqrestore(&pch->lock, flags);
2140 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2142 spin_unlock_irqrestore(&pch->lock, flags);
2147 static int pl330_config(struct dma_chan *chan,
2148 struct dma_slave_config *slave_config)
2150 struct dma_pl330_chan *pch = to_pchan(chan);
2152 if (slave_config->direction == DMA_MEM_TO_DEV) {
2153 if (slave_config->dst_addr)
2154 pch->fifo_addr = slave_config->dst_addr;
2155 if (slave_config->dst_addr_width)
2156 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2157 if (slave_config->dst_maxburst)
2158 pch->burst_len = slave_config->dst_maxburst;
2159 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2160 if (slave_config->src_addr)
2161 pch->fifo_addr = slave_config->src_addr;
2162 if (slave_config->src_addr_width)
2163 pch->burst_sz = __ffs(slave_config->src_addr_width);
2164 if (slave_config->src_maxburst)
2165 pch->burst_len = slave_config->src_maxburst;
2171 static int pl330_terminate_all(struct dma_chan *chan)
2173 struct dma_pl330_chan *pch = to_pchan(chan);
2174 struct dma_pl330_desc *desc;
2175 unsigned long flags;
2176 struct pl330_dmac *pl330 = pch->dmac;
2179 pm_runtime_get_sync(pl330->ddma.dev);
2180 spin_lock_irqsave(&pch->lock, flags);
2181 spin_lock(&pl330->lock);
2183 spin_unlock(&pl330->lock);
2185 pch->thread->req[0].desc = NULL;
2186 pch->thread->req[1].desc = NULL;
2187 pch->thread->req_running = -1;
2189 /* Mark all desc done */
2190 list_for_each_entry(desc, &pch->submitted_list, node) {
2191 desc->status = FREE;
2192 dma_cookie_complete(&desc->txd);
2195 list_for_each_entry(desc, &pch->work_list , node) {
2196 desc->status = FREE;
2197 dma_cookie_complete(&desc->txd);
2200 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2201 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2202 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2203 spin_unlock_irqrestore(&pch->lock, flags);
2204 pm_runtime_mark_last_busy(pl330->ddma.dev);
2205 pm_runtime_put_autosuspend(pl330->ddma.dev);
2211 * We don't support DMA_RESUME command because of hardware
2212 * limitations, so after pausing the channel we cannot restore
2213 * it to active state. We have to terminate channel and setup
2214 * DMA transfer again. This pause feature was implemented to
2215 * allow safely read residue before channel termination.
2217 static int pl330_pause(struct dma_chan *chan)
2219 struct dma_pl330_chan *pch = to_pchan(chan);
2220 struct pl330_dmac *pl330 = pch->dmac;
2221 unsigned long flags;
2223 pm_runtime_get_sync(pl330->ddma.dev);
2224 spin_lock_irqsave(&pch->lock, flags);
2226 spin_lock(&pl330->lock);
2228 spin_unlock(&pl330->lock);
2230 spin_unlock_irqrestore(&pch->lock, flags);
2231 pm_runtime_mark_last_busy(pl330->ddma.dev);
2232 pm_runtime_put_autosuspend(pl330->ddma.dev);
2237 static void pl330_free_chan_resources(struct dma_chan *chan)
2239 struct dma_pl330_chan *pch = to_pchan(chan);
2240 unsigned long flags;
2242 tasklet_kill(&pch->task);
2244 pm_runtime_get_sync(pch->dmac->ddma.dev);
2245 spin_lock_irqsave(&pch->lock, flags);
2247 pl330_release_channel(pch->thread);
2251 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2253 spin_unlock_irqrestore(&pch->lock, flags);
2254 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2255 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2258 static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2259 struct dma_pl330_desc *desc)
2261 struct pl330_thread *thrd = pch->thread;
2262 struct pl330_dmac *pl330 = pch->dmac;
2263 void __iomem *regs = thrd->dmac->base;
2266 pm_runtime_get_sync(pl330->ddma.dev);
2268 if (desc->rqcfg.src_inc) {
2269 val = readl(regs + SA(thrd->id));
2270 addr = desc->px.src_addr;
2272 val = readl(regs + DA(thrd->id));
2273 addr = desc->px.dst_addr;
2275 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2276 pm_runtime_put_autosuspend(pl330->ddma.dev);
2280 static enum dma_status
2281 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2282 struct dma_tx_state *txstate)
2284 enum dma_status ret;
2285 unsigned long flags;
2286 struct dma_pl330_desc *desc, *running = NULL;
2287 struct dma_pl330_chan *pch = to_pchan(chan);
2288 unsigned int transferred, residual = 0;
2290 ret = dma_cookie_status(chan, cookie, txstate);
2295 if (ret == DMA_COMPLETE)
2298 spin_lock_irqsave(&pch->lock, flags);
2300 if (pch->thread->req_running != -1)
2301 running = pch->thread->req[pch->thread->req_running].desc;
2303 /* Check in pending list */
2304 list_for_each_entry(desc, &pch->work_list, node) {
2305 if (desc->status == DONE)
2306 transferred = desc->bytes_requested;
2307 else if (running && desc == running)
2309 pl330_get_current_xferred_count(pch, desc);
2312 residual += desc->bytes_requested - transferred;
2313 if (desc->txd.cookie == cookie) {
2314 switch (desc->status) {
2320 ret = DMA_IN_PROGRESS;
2330 spin_unlock_irqrestore(&pch->lock, flags);
2333 dma_set_residue(txstate, residual);
2338 static void pl330_issue_pending(struct dma_chan *chan)
2340 struct dma_pl330_chan *pch = to_pchan(chan);
2341 unsigned long flags;
2343 spin_lock_irqsave(&pch->lock, flags);
2344 if (list_empty(&pch->work_list)) {
2346 * Warn on nothing pending. Empty submitted_list may
2347 * break our pm_runtime usage counter as it is
2348 * updated on work_list emptiness status.
2350 WARN_ON(list_empty(&pch->submitted_list));
2351 pm_runtime_get_sync(pch->dmac->ddma.dev);
2353 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2354 spin_unlock_irqrestore(&pch->lock, flags);
2356 pl330_tasklet((unsigned long)pch);
2360 * We returned the last one of the circular list of descriptor(s)
2361 * from prep_xxx, so the argument to submit corresponds to the last
2362 * descriptor of the list.
2364 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2366 struct dma_pl330_desc *desc, *last = to_desc(tx);
2367 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2368 dma_cookie_t cookie;
2369 unsigned long flags;
2371 spin_lock_irqsave(&pch->lock, flags);
2373 /* Assign cookies to all nodes */
2374 while (!list_empty(&last->node)) {
2375 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2377 desc->txd.callback = last->txd.callback;
2378 desc->txd.callback_param = last->txd.callback_param;
2382 dma_cookie_assign(&desc->txd);
2384 list_move_tail(&desc->node, &pch->submitted_list);
2388 cookie = dma_cookie_assign(&last->txd);
2389 list_add_tail(&last->node, &pch->submitted_list);
2390 spin_unlock_irqrestore(&pch->lock, flags);
2395 static inline void _init_desc(struct dma_pl330_desc *desc)
2397 desc->rqcfg.swap = SWAP_NO;
2398 desc->rqcfg.scctl = CCTRL0;
2399 desc->rqcfg.dcctl = CCTRL0;
2400 desc->txd.tx_submit = pl330_tx_submit;
2402 INIT_LIST_HEAD(&desc->node);
2405 /* Returns the number of descriptors added to the DMAC pool */
2406 static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
2408 struct dma_pl330_desc *desc;
2409 unsigned long flags;
2412 desc = kcalloc(count, sizeof(*desc), flg);
2416 spin_lock_irqsave(&pl330->pool_lock, flags);
2418 for (i = 0; i < count; i++) {
2419 _init_desc(&desc[i]);
2420 list_add_tail(&desc[i].node, &pl330->desc_pool);
2423 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2428 static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
2430 struct dma_pl330_desc *desc = NULL;
2431 unsigned long flags;
2433 spin_lock_irqsave(&pl330->pool_lock, flags);
2435 if (!list_empty(&pl330->desc_pool)) {
2436 desc = list_entry(pl330->desc_pool.next,
2437 struct dma_pl330_desc, node);
2439 list_del_init(&desc->node);
2441 desc->status = PREP;
2442 desc->txd.callback = NULL;
2445 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2450 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2452 struct pl330_dmac *pl330 = pch->dmac;
2453 u8 *peri_id = pch->chan.private;
2454 struct dma_pl330_desc *desc;
2456 /* Pluck one desc from the pool of DMAC */
2457 desc = pluck_desc(pl330);
2459 /* If the DMAC pool is empty, alloc new */
2461 if (!add_desc(pl330, GFP_ATOMIC, 1))
2465 desc = pluck_desc(pl330);
2467 dev_err(pch->dmac->ddma.dev,
2468 "%s:%d ALERT!\n", __func__, __LINE__);
2473 /* Initialize the descriptor */
2475 desc->txd.cookie = 0;
2476 async_tx_ack(&desc->txd);
2478 desc->peri = peri_id ? pch->chan.chan_id : 0;
2479 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2481 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2486 static inline void fill_px(struct pl330_xfer *px,
2487 dma_addr_t dst, dma_addr_t src, size_t len)
2494 static struct dma_pl330_desc *
2495 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2496 dma_addr_t src, size_t len)
2498 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2501 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2502 __func__, __LINE__);
2507 * Ideally we should lookout for reqs bigger than
2508 * those that can be programmed with 256 bytes of
2509 * MC buffer, but considering a req size is seldom
2510 * going to be word-unaligned and more than 200MB,
2512 * Also, should the limit is reached we'd rather
2513 * have the platform increase MC buffer size than
2514 * complicating this API driver.
2516 fill_px(&desc->px, dst, src, len);
2521 /* Call after fixing burst size */
2522 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2524 struct dma_pl330_chan *pch = desc->pchan;
2525 struct pl330_dmac *pl330 = pch->dmac;
2528 burst_len = pl330->pcfg.data_bus_width / 8;
2529 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
2530 burst_len >>= desc->rqcfg.brst_size;
2532 /* src/dst_burst_len can't be more than 16 */
2536 while (burst_len > 1) {
2537 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2545 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2546 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2547 size_t period_len, enum dma_transfer_direction direction,
2548 unsigned long flags)
2550 struct dma_pl330_desc *desc = NULL, *first = NULL;
2551 struct dma_pl330_chan *pch = to_pchan(chan);
2552 struct pl330_dmac *pl330 = pch->dmac;
2557 if (len % period_len != 0)
2560 if (!is_slave_direction(direction)) {
2561 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
2562 __func__, __LINE__);
2566 for (i = 0; i < len / period_len; i++) {
2567 desc = pl330_get_desc(pch);
2569 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2570 __func__, __LINE__);
2575 spin_lock_irqsave(&pl330->pool_lock, flags);
2577 while (!list_empty(&first->node)) {
2578 desc = list_entry(first->node.next,
2579 struct dma_pl330_desc, node);
2580 list_move_tail(&desc->node, &pl330->desc_pool);
2583 list_move_tail(&first->node, &pl330->desc_pool);
2585 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2590 switch (direction) {
2591 case DMA_MEM_TO_DEV:
2592 desc->rqcfg.src_inc = 1;
2593 desc->rqcfg.dst_inc = 0;
2595 dst = pch->fifo_addr;
2597 case DMA_DEV_TO_MEM:
2598 desc->rqcfg.src_inc = 0;
2599 desc->rqcfg.dst_inc = 1;
2600 src = pch->fifo_addr;
2607 desc->rqtype = direction;
2608 desc->rqcfg.brst_size = pch->burst_sz;
2610 if (pl330->peripherals_req_type == BURST)
2611 desc->rqcfg.brst_len = pch->burst_len;
2613 desc->rqcfg.brst_len = 1;
2615 desc->bytes_requested = period_len;
2616 fill_px(&desc->px, dst, src, period_len);
2621 list_add_tail(&desc->node, &first->node);
2623 dma_addr += period_len;
2630 desc->txd.flags = flags;
2635 static struct dma_async_tx_descriptor *
2636 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2637 dma_addr_t src, size_t len, unsigned long flags)
2639 struct dma_pl330_desc *desc;
2640 struct dma_pl330_chan *pch = to_pchan(chan);
2641 struct pl330_dmac *pl330;
2644 if (unlikely(!pch || !len))
2649 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2653 desc->rqcfg.src_inc = 1;
2654 desc->rqcfg.dst_inc = 1;
2655 desc->rqtype = DMA_MEM_TO_MEM;
2657 /* Select max possible burst size */
2658 burst = pl330->pcfg.data_bus_width / 8;
2661 * Make sure we use a burst size that aligns with all the memcpy
2662 * parameters because our DMA programming algorithm doesn't cope with
2663 * transfers which straddle an entry in the DMA device's MFIFO.
2665 while ((src | dst | len) & (burst - 1))
2668 desc->rqcfg.brst_size = 0;
2669 while (burst != (1 << desc->rqcfg.brst_size))
2670 desc->rqcfg.brst_size++;
2673 * If burst size is smaller than bus width then make sure we only
2674 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2676 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
2677 desc->rqcfg.brst_len = 1;
2679 desc->rqcfg.brst_len = get_burst_len(desc, len);
2680 desc->bytes_requested = len;
2682 desc->txd.flags = flags;
2687 static void __pl330_giveback_desc(struct pl330_dmac *pl330,
2688 struct dma_pl330_desc *first)
2690 unsigned long flags;
2691 struct dma_pl330_desc *desc;
2696 spin_lock_irqsave(&pl330->pool_lock, flags);
2698 while (!list_empty(&first->node)) {
2699 desc = list_entry(first->node.next,
2700 struct dma_pl330_desc, node);
2701 list_move_tail(&desc->node, &pl330->desc_pool);
2704 list_move_tail(&first->node, &pl330->desc_pool);
2706 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2709 static struct dma_async_tx_descriptor *
2710 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2711 unsigned int sg_len, enum dma_transfer_direction direction,
2712 unsigned long flg, void *context)
2714 struct dma_pl330_desc *first, *desc = NULL;
2715 struct dma_pl330_chan *pch = to_pchan(chan);
2716 struct pl330_dmac *pl330 = pch->dmac;
2717 struct scatterlist *sg;
2721 if (unlikely(!pch || !sgl || !sg_len))
2724 addr = pch->fifo_addr;
2728 for_each_sg(sgl, sg, sg_len, i) {
2730 desc = pl330_get_desc(pch);
2732 struct pl330_dmac *pl330 = pch->dmac;
2734 dev_err(pch->dmac->ddma.dev,
2735 "%s:%d Unable to fetch desc\n",
2736 __func__, __LINE__);
2737 __pl330_giveback_desc(pl330, first);
2745 list_add_tail(&desc->node, &first->node);
2747 if (direction == DMA_MEM_TO_DEV) {
2748 desc->rqcfg.src_inc = 1;
2749 desc->rqcfg.dst_inc = 0;
2751 addr, sg_dma_address(sg), sg_dma_len(sg));
2753 desc->rqcfg.src_inc = 0;
2754 desc->rqcfg.dst_inc = 1;
2756 sg_dma_address(sg), addr, sg_dma_len(sg));
2759 desc->rqcfg.brst_size = pch->burst_sz;
2761 if (pl330->peripherals_req_type == BURST)
2762 desc->rqcfg.brst_len = pch->burst_len;
2764 desc->rqcfg.brst_len = 1;
2766 desc->rqtype = direction;
2767 desc->bytes_requested = sg_dma_len(sg);
2770 /* Return the last desc in the chain */
2771 desc->txd.flags = flg;
2775 static irqreturn_t pl330_irq_handler(int irq, void *data)
2777 if (pl330_update(data))
2783 #define PL330_DMA_BUSWIDTHS \
2784 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2785 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2786 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2787 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2788 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2791 * Runtime PM callbacks are provided by amba/bus.c driver.
2793 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2794 * bus driver will only disable/enable the clock in runtime PM callbacks.
2796 static int __maybe_unused pl330_suspend(struct device *dev)
2798 struct amba_device *pcdev = to_amba_device(dev);
2800 pm_runtime_disable(dev);
2802 if (!pm_runtime_status_suspended(dev)) {
2803 /* amba did not disable the clock */
2804 amba_pclk_disable(pcdev);
2806 amba_pclk_unprepare(pcdev);
2811 static int __maybe_unused pl330_resume(struct device *dev)
2813 struct amba_device *pcdev = to_amba_device(dev);
2816 ret = amba_pclk_prepare(pcdev);
2820 if (!pm_runtime_status_suspended(dev))
2821 ret = amba_pclk_enable(pcdev);
2823 pm_runtime_enable(dev);
2828 static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
2831 pl330_probe(struct amba_device *adev, const struct amba_id *id)
2833 struct dma_pl330_platdata *pdat;
2834 struct pl330_config *pcfg;
2835 struct pl330_dmac *pl330;
2836 struct dma_pl330_chan *pch, *_p;
2837 struct dma_device *pd;
2838 struct resource *res;
2841 struct device_node *np = adev->dev.of_node;
2843 pdat = dev_get_platdata(&adev->dev);
2845 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2849 /* Allocate a new DMAC and its Channels */
2850 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
2852 dev_err(&adev->dev, "unable to allocate mem\n");
2857 pd->dev = &adev->dev;
2859 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
2861 if (of_find_property(np, "peripherals-req-type-burst", NULL))
2862 pl330->peripherals_req_type = BURST;
2864 pl330->peripherals_req_type = SINGLE;
2867 for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
2868 if (of_property_read_bool(np, of_quirks[i].quirk))
2869 pl330->quirks |= of_quirks[i].id;
2872 pl330->base = devm_ioremap_resource(&adev->dev, res);
2873 if (IS_ERR(pl330->base))
2874 return PTR_ERR(pl330->base);
2876 amba_set_drvdata(adev, pl330);
2878 for (i = 0; i < AMBA_NR_IRQS; i++) {
2881 ret = devm_request_irq(&adev->dev, irq,
2882 pl330_irq_handler, 0,
2883 dev_name(&adev->dev), pl330);
2891 pcfg = &pl330->pcfg;
2893 pcfg->periph_id = adev->periphid;
2894 ret = pl330_add(pl330);
2898 INIT_LIST_HEAD(&pl330->desc_pool);
2899 spin_lock_init(&pl330->pool_lock);
2901 /* Create a descriptor pool of default size */
2902 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
2903 dev_warn(&adev->dev, "unable to allocate desc\n");
2905 INIT_LIST_HEAD(&pd->channels);
2907 /* Initialize channel parameters */
2909 num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
2911 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
2913 pl330->num_peripherals = num_chan;
2915 pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2916 if (!pl330->peripherals) {
2918 dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
2922 for (i = 0; i < num_chan; i++) {
2923 pch = &pl330->peripherals[i];
2924 if (!adev->dev.of_node)
2925 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
2927 pch->chan.private = adev->dev.of_node;
2929 INIT_LIST_HEAD(&pch->submitted_list);
2930 INIT_LIST_HEAD(&pch->work_list);
2931 INIT_LIST_HEAD(&pch->completed_list);
2932 spin_lock_init(&pch->lock);
2934 pch->chan.device = pd;
2937 /* Add the channel to the DMAC list */
2938 list_add_tail(&pch->chan.device_node, &pd->channels);
2942 pd->cap_mask = pdat->cap_mask;
2944 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
2945 if (pcfg->num_peri) {
2946 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2947 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
2948 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
2952 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2953 pd->device_free_chan_resources = pl330_free_chan_resources;
2954 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
2955 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2956 pd->device_tx_status = pl330_tx_status;
2957 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2958 pd->device_config = pl330_config;
2959 pd->device_pause = pl330_pause;
2960 pd->device_terminate_all = pl330_terminate_all;
2961 pd->device_issue_pending = pl330_issue_pending;
2962 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
2963 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
2964 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2965 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2966 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
2967 1 : PL330_MAX_BURST);
2969 ret = dma_async_device_register(pd);
2971 dev_err(&adev->dev, "unable to register DMAC\n");
2975 if (adev->dev.of_node) {
2976 ret = of_dma_controller_register(adev->dev.of_node,
2977 of_dma_pl330_xlate, pl330);
2980 "unable to register DMA to the generic DT DMA helpers\n");
2984 adev->dev.dma_parms = &pl330->dma_parms;
2987 * This is the limit for transfers with a buswidth of 1, larger
2988 * buswidths will have larger limits.
2990 ret = dma_set_max_seg_size(&adev->dev, 1900800);
2992 dev_err(&adev->dev, "unable to set the seg size\n");
2995 dev_info(&adev->dev,
2996 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
2997 dev_info(&adev->dev,
2998 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2999 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
3000 pcfg->num_peri, pcfg->num_events);
3002 pm_runtime_irq_safe(&adev->dev);
3003 pm_runtime_use_autosuspend(&adev->dev);
3004 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
3005 pm_runtime_mark_last_busy(&adev->dev);
3006 pm_runtime_put_autosuspend(&adev->dev);
3011 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3014 /* Remove the channel */
3015 list_del(&pch->chan.device_node);
3017 /* Flush the channel */
3019 pl330_terminate_all(&pch->chan);
3020 pl330_free_chan_resources(&pch->chan);
3029 static int pl330_remove(struct amba_device *adev)
3031 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
3032 struct dma_pl330_chan *pch, *_p;
3034 pm_runtime_get_noresume(pl330->ddma.dev);
3036 if (adev->dev.of_node)
3037 of_dma_controller_free(adev->dev.of_node);
3039 dma_async_device_unregister(&pl330->ddma);
3042 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3045 /* Remove the channel */
3046 list_del(&pch->chan.device_node);
3048 /* Flush the channel */
3050 pl330_terminate_all(&pch->chan);
3051 pl330_free_chan_resources(&pch->chan);
3060 static struct amba_id pl330_ids[] = {
3068 MODULE_DEVICE_TABLE(amba, pl330_ids);
3070 static struct amba_driver pl330_driver = {
3072 .owner = THIS_MODULE,
3073 .name = "dma-pl330",
3076 .id_table = pl330_ids,
3077 .probe = pl330_probe,
3078 .remove = pl330_remove,
3081 module_amba_driver(pl330_driver);
3083 MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
3084 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3085 MODULE_LICENSE("GPL");