2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
28 #include <linux/of_dma.h>
29 #include <linux/err.h>
31 #include "dmaengine.h"
32 #define PL330_MAX_CHAN 8
33 #define PL330_MAX_IRQS 32
34 #define PL330_MAX_PERI 32
36 enum pl330_srccachectrl {
37 SCCTRL0, /* Noncacheable and nonbufferable */
38 SCCTRL1, /* Bufferable only */
39 SCCTRL2, /* Cacheable, but do not allocate */
40 SCCTRL3, /* Cacheable and bufferable, but do not allocate */
43 SCCTRL6, /* Cacheable write-through, allocate on reads only */
44 SCCTRL7, /* Cacheable write-back, allocate on reads only */
47 enum pl330_dstcachectrl {
48 DCCTRL0, /* Noncacheable and nonbufferable */
49 DCCTRL1, /* Bufferable only */
50 DCCTRL2, /* Cacheable, but do not allocate */
51 DCCTRL3, /* Cacheable and bufferable, but do not allocate */
52 DINVALID1, /* AWCACHE = 0x1000 */
54 DCCTRL6, /* Cacheable write-through, allocate on writes only */
55 DCCTRL7, /* Cacheable write-back, allocate on writes only */
66 /* Register and Bit field Definitions */
68 #define DS_ST_STOP 0x0
69 #define DS_ST_EXEC 0x1
70 #define DS_ST_CMISS 0x2
71 #define DS_ST_UPDTPC 0x3
73 #define DS_ST_ATBRR 0x5
74 #define DS_ST_QBUSY 0x6
76 #define DS_ST_KILL 0x8
77 #define DS_ST_CMPLT 0x9
78 #define DS_ST_FLTCMP 0xe
79 #define DS_ST_FAULT 0xf
84 #define INTSTATUS 0x28
91 #define FTC(n) (_FTC + (n)*0x4)
94 #define CS(n) (_CS + (n)*0x8)
95 #define CS_CNS (1 << 21)
98 #define CPC(n) (_CPC + (n)*0x8)
101 #define SA(n) (_SA + (n)*0x20)
104 #define DA(n) (_DA + (n)*0x20)
107 #define CC(n) (_CC + (n)*0x20)
109 #define CC_SRCINC (1 << 0)
110 #define CC_DSTINC (1 << 14)
111 #define CC_SRCPRI (1 << 8)
112 #define CC_DSTPRI (1 << 22)
113 #define CC_SRCNS (1 << 9)
114 #define CC_DSTNS (1 << 23)
115 #define CC_SRCIA (1 << 10)
116 #define CC_DSTIA (1 << 24)
117 #define CC_SRCBRSTLEN_SHFT 4
118 #define CC_DSTBRSTLEN_SHFT 18
119 #define CC_SRCBRSTSIZE_SHFT 1
120 #define CC_DSTBRSTSIZE_SHFT 15
121 #define CC_SRCCCTRL_SHFT 11
122 #define CC_SRCCCTRL_MASK 0x7
123 #define CC_DSTCCTRL_SHFT 25
124 #define CC_DRCCCTRL_MASK 0x7
125 #define CC_SWAP_SHFT 28
128 #define LC0(n) (_LC0 + (n)*0x20)
131 #define LC1(n) (_LC1 + (n)*0x20)
133 #define DBGSTATUS 0xd00
134 #define DBG_BUSY (1 << 0)
137 #define DBGINST0 0xd08
138 #define DBGINST1 0xd0c
147 #define PERIPH_ID 0xfe0
148 #define PERIPH_REV_SHIFT 20
149 #define PERIPH_REV_MASK 0xf
150 #define PERIPH_REV_R0P0 0
151 #define PERIPH_REV_R1P0 1
152 #define PERIPH_REV_R1P1 2
154 #define CR0_PERIPH_REQ_SET (1 << 0)
155 #define CR0_BOOT_EN_SET (1 << 1)
156 #define CR0_BOOT_MAN_NS (1 << 2)
157 #define CR0_NUM_CHANS_SHIFT 4
158 #define CR0_NUM_CHANS_MASK 0x7
159 #define CR0_NUM_PERIPH_SHIFT 12
160 #define CR0_NUM_PERIPH_MASK 0x1f
161 #define CR0_NUM_EVENTS_SHIFT 17
162 #define CR0_NUM_EVENTS_MASK 0x1f
164 #define CR1_ICACHE_LEN_SHIFT 0
165 #define CR1_ICACHE_LEN_MASK 0x7
166 #define CR1_NUM_ICACHELINES_SHIFT 4
167 #define CR1_NUM_ICACHELINES_MASK 0xf
169 #define CRD_DATA_WIDTH_SHIFT 0
170 #define CRD_DATA_WIDTH_MASK 0x7
171 #define CRD_WR_CAP_SHIFT 4
172 #define CRD_WR_CAP_MASK 0x7
173 #define CRD_WR_Q_DEP_SHIFT 8
174 #define CRD_WR_Q_DEP_MASK 0xf
175 #define CRD_RD_CAP_SHIFT 12
176 #define CRD_RD_CAP_MASK 0x7
177 #define CRD_RD_Q_DEP_SHIFT 16
178 #define CRD_RD_Q_DEP_MASK 0xf
179 #define CRD_DATA_BUFF_SHIFT 20
180 #define CRD_DATA_BUFF_MASK 0x3ff
183 #define DESIGNER 0x41
185 #define INTEG_CFG 0x0
186 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
188 #define PL330_STATE_STOPPED (1 << 0)
189 #define PL330_STATE_EXECUTING (1 << 1)
190 #define PL330_STATE_WFE (1 << 2)
191 #define PL330_STATE_FAULTING (1 << 3)
192 #define PL330_STATE_COMPLETING (1 << 4)
193 #define PL330_STATE_WFP (1 << 5)
194 #define PL330_STATE_KILLING (1 << 6)
195 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
196 #define PL330_STATE_CACHEMISS (1 << 8)
197 #define PL330_STATE_UPDTPC (1 << 9)
198 #define PL330_STATE_ATBARRIER (1 << 10)
199 #define PL330_STATE_QUEUEBUSY (1 << 11)
200 #define PL330_STATE_INVALID (1 << 15)
202 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
203 | PL330_STATE_WFE | PL330_STATE_FAULTING)
205 #define CMD_DMAADDH 0x54
206 #define CMD_DMAEND 0x00
207 #define CMD_DMAFLUSHP 0x35
208 #define CMD_DMAGO 0xa0
209 #define CMD_DMALD 0x04
210 #define CMD_DMALDP 0x25
211 #define CMD_DMALP 0x20
212 #define CMD_DMALPEND 0x28
213 #define CMD_DMAKILL 0x01
214 #define CMD_DMAMOV 0xbc
215 #define CMD_DMANOP 0x18
216 #define CMD_DMARMB 0x12
217 #define CMD_DMASEV 0x34
218 #define CMD_DMAST 0x08
219 #define CMD_DMASTP 0x29
220 #define CMD_DMASTZ 0x0c
221 #define CMD_DMAWFE 0x36
222 #define CMD_DMAWFP 0x30
223 #define CMD_DMAWMB 0x13
227 #define SZ_DMAFLUSHP 2
231 #define SZ_DMALPEND 2
245 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
246 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
248 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
249 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
252 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
253 * at 1byte/burst for P<->M and M<->M respectively.
254 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
255 * should be enough for P<->M and M<->M respectively.
257 #define MCODE_BUFF_PER_REQ 256
259 /* If the _pl330_req is available to the client */
260 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
262 /* Use this _only_ to wait on transient states */
263 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
265 #ifdef PL330_DEBUG_MCGEN
266 static unsigned cmd_line;
267 #define PL330_DBGCMD_DUMP(off, x...) do { \
268 printk("%x:", cmd_line); \
272 #define PL330_DBGMC_START(addr) (cmd_line = addr)
274 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
275 #define PL330_DBGMC_START(addr) do {} while (0)
278 /* The number of default descriptors */
280 #define NR_DEFAULT_DESC 16
282 /* Populated by the PL330 core driver for DMA API driver's info */
283 struct pl330_config {
285 #define DMAC_MODE_NS (1 << 0)
287 unsigned int data_bus_width:10; /* In number of bits */
288 unsigned int data_buf_dep:10;
289 unsigned int num_chan:4;
290 unsigned int num_peri:6;
292 unsigned int num_events:6;
296 /* Handle to the DMAC provided to the PL330 core */
300 /* Size of MicroCode buffers for each channel. */
302 /* ioremap'ed address of PL330 registers. */
304 /* Client can freely use it. */
306 /* PL330 core data, Client must not touch it. */
308 /* Populated by the PL330 core driver during pl330_add */
309 struct pl330_config pcfg;
311 * If the DMAC has some reset mechanism, then the
312 * client may want to provide pointer to the method.
314 void (*dmac_reset)(struct pl330_info *pi);
318 * Request Configuration.
319 * The PL330 core does not modify this and uses the last
320 * working configuration if the request doesn't provide any.
322 * The Client may want to provide this info only for the
323 * first request and a request with new settings.
325 struct pl330_reqcfg {
326 /* Address Incrementing */
331 * For now, the SRC & DST protection levels
332 * and burst size/length are assumed same.
338 unsigned brst_size:3; /* in power of 2 */
340 enum pl330_dstcachectrl dcctl;
341 enum pl330_srccachectrl scctl;
342 enum pl330_byteswap swap;
343 struct pl330_config *pcfg;
347 * One cycle of DMAC operation.
348 * There may be more than one xfer in a request.
356 * Pointer to next xfer in the list.
357 * The last xfer in the req must point to NULL.
359 struct pl330_xfer *next;
362 /* The xfer callbacks are made with one of these arguments. */
364 /* The all xfers in the request were success. */
366 /* If req aborted due to global error. */
368 /* If req failed due to problem with Channel. */
372 /* A request defining Scatter-Gather List ending with NULL xfer. */
374 enum dma_transfer_direction rqtype;
375 /* Index of peripheral for the xfer. */
377 /* Unique token for this xfer, set by the client. */
379 /* Callback to be called after xfer. */
380 void (*xfer_cb)(void *token, enum pl330_op_err err);
381 /* If NULL, req will be done at last set parameters. */
382 struct pl330_reqcfg *cfg;
383 /* Pointer to first xfer in the request. */
384 struct pl330_xfer *x;
385 /* Hook to attach to DMAC's list of reqs with due callback */
386 struct list_head rqd;
390 * To know the status of the channel and DMAC, the client
391 * provides a pointer to this structure. The PL330 core
392 * fills it with current information.
394 struct pl330_chanstatus {
396 * If the DMAC engine halted due to some error,
397 * the client should remove-add DMAC.
401 * If channel is halted due to some error,
402 * the client should ABORT/FLUSH and START the channel.
405 /* Location of last load */
407 /* Location of last store */
410 * Pointer to the currently active req, NULL if channel is
411 * inactive, even though the requests may be present.
413 struct pl330_req *top_req;
414 /* Pointer to req waiting second in the queue if any. */
415 struct pl330_req *wait_req;
419 /* Start the channel */
421 /* Abort the active xfer */
423 /* Stop xfer and flush queue */
430 struct pl330_xfer *x;
453 /* Number of bytes taken to setup MC for the req */
458 /* ToBeDone for tasklet */
466 struct pl330_thread {
469 /* If the channel is not yet acquired by any client */
472 struct pl330_dmac *dmac;
473 /* Only two at a time */
474 struct _pl330_req req[2];
475 /* Index of the last enqueued request */
477 /* Index of the last submitted request or -1 if the DMA is stopped */
481 enum pl330_dmac_state {
490 /* Holds list of reqs with due callbacks */
491 struct list_head req_done;
492 /* Pointer to platform specific stuff */
493 struct pl330_info *pinfo;
494 /* Maximum possible events/irqs */
496 /* BUS address of MicroCode buffer */
497 dma_addr_t mcode_bus;
498 /* CPU address of MicroCode buffer */
500 /* List of all Channel threads */
501 struct pl330_thread *channels;
502 /* Pointer to the MANAGER thread */
503 struct pl330_thread *manager;
504 /* To handle bad news in interrupt */
505 struct tasklet_struct tasks;
506 struct _pl330_tbd dmac_tbd;
507 /* State of DMAC operation */
508 enum pl330_dmac_state state;
512 /* In the DMAC pool */
515 * Allocated to some channel during prep_xxx
516 * Also may be sitting on the work_list.
520 * Sitting on the work_list and already submitted
521 * to the PL330 core. Not more than two descriptors
522 * of a channel can be BUSY at any time.
526 * Sitting on the channel work_list but xfer done
532 struct dma_pl330_chan {
533 /* Schedule desc completion */
534 struct tasklet_struct task;
536 /* DMA-Engine Channel */
537 struct dma_chan chan;
539 /* List of submitted descriptors */
540 struct list_head submitted_list;
541 /* List of issued descriptors */
542 struct list_head work_list;
543 /* List of completed descriptors */
544 struct list_head completed_list;
546 /* Pointer to the DMAC that manages this channel,
547 * NULL if the channel is available to be acquired.
548 * As the parent, this DMAC also provides descriptors
551 struct dma_pl330_dmac *dmac;
553 /* To protect channel manipulation */
556 /* Token of a hardware channel thread of PL330 DMAC
557 * NULL if the channel is available to be acquired.
561 /* For D-to-M and M-to-D channels */
562 int burst_sz; /* the peripheral fifo width */
563 int burst_len; /* the number of burst */
564 dma_addr_t fifo_addr;
566 /* for cyclic capability */
570 struct dma_pl330_dmac {
571 struct pl330_info pif;
573 /* DMA-Engine Device */
574 struct dma_device ddma;
576 /* Holds info about sg limitations */
577 struct device_dma_parameters dma_parms;
579 /* Pool of descriptors available for the DMAC's channels */
580 struct list_head desc_pool;
581 /* To protect desc_pool manipulation */
582 spinlock_t pool_lock;
584 /* Peripheral channels connected to this DMAC */
585 unsigned int num_peripherals;
586 struct dma_pl330_chan *peripherals; /* keep at end */
589 struct dma_pl330_desc {
590 /* To attach to a queue as child */
591 struct list_head node;
593 /* Descriptor for the DMA Engine API */
594 struct dma_async_tx_descriptor txd;
596 /* Xfer for PL330 core */
597 struct pl330_xfer px;
599 struct pl330_reqcfg rqcfg;
600 struct pl330_req req;
602 enum desc_status status;
604 /* The channel which currently holds this desc */
605 struct dma_pl330_chan *pchan;
608 static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
611 r->xfer_cb(r->token, err);
614 static inline bool _queue_empty(struct pl330_thread *thrd)
616 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
620 static inline bool _queue_full(struct pl330_thread *thrd)
622 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
626 static inline bool is_manager(struct pl330_thread *thrd)
628 struct pl330_dmac *pl330 = thrd->dmac;
630 /* MANAGER is indexed at the end */
631 if (thrd->id == pl330->pinfo->pcfg.num_chan)
637 /* If manager of the thread is in Non-Secure mode */
638 static inline bool _manager_ns(struct pl330_thread *thrd)
640 struct pl330_dmac *pl330 = thrd->dmac;
642 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
645 static inline u32 get_revision(u32 periph_id)
647 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
650 static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
651 enum pl330_dst da, u16 val)
656 buf[0] = CMD_DMAADDH;
658 *((u16 *)&buf[1]) = val;
660 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
661 da == 1 ? "DA" : "SA", val);
666 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
673 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
678 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
683 buf[0] = CMD_DMAFLUSHP;
689 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
694 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
702 buf[0] |= (0 << 1) | (1 << 0);
703 else if (cond == BURST)
704 buf[0] |= (1 << 1) | (1 << 0);
706 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
707 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
712 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
713 enum pl330_cond cond, u8 peri)
727 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
728 cond == SINGLE ? 'S' : 'B', peri >> 3);
733 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
734 unsigned loop, u8 cnt)
744 cnt--; /* DMAC increments by 1 internally */
747 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
753 enum pl330_cond cond;
759 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
760 const struct _arg_LPEND *arg)
762 enum pl330_cond cond = arg->cond;
763 bool forever = arg->forever;
764 unsigned loop = arg->loop;
765 u8 bjump = arg->bjump;
770 buf[0] = CMD_DMALPEND;
779 buf[0] |= (0 << 1) | (1 << 0);
780 else if (cond == BURST)
781 buf[0] |= (1 << 1) | (1 << 0);
785 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
786 forever ? "FE" : "END",
787 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
794 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
799 buf[0] = CMD_DMAKILL;
804 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
805 enum dmamov_dst dst, u32 val)
812 *((u32 *)&buf[2]) = val;
814 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
815 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
820 static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
827 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
832 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
839 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
844 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
855 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
860 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
868 buf[0] |= (0 << 1) | (1 << 0);
869 else if (cond == BURST)
870 buf[0] |= (1 << 1) | (1 << 0);
872 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
873 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
878 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
879 enum pl330_cond cond, u8 peri)
893 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
894 cond == SINGLE ? 'S' : 'B', peri >> 3);
899 static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
906 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
911 static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
926 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
927 ev >> 3, invalidate ? ", I" : "");
932 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
933 enum pl330_cond cond, u8 peri)
941 buf[0] |= (0 << 1) | (0 << 0);
942 else if (cond == BURST)
943 buf[0] |= (1 << 1) | (0 << 0);
945 buf[0] |= (0 << 1) | (1 << 0);
951 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
952 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
957 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
964 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
975 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
976 const struct _arg_GO *arg)
979 u32 addr = arg->addr;
980 unsigned ns = arg->ns;
990 *((u32 *)&buf[2]) = addr;
995 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
997 /* Returns Time-Out */
998 static bool _until_dmac_idle(struct pl330_thread *thrd)
1000 void __iomem *regs = thrd->dmac->pinfo->base;
1001 unsigned long loops = msecs_to_loops(5);
1004 /* Until Manager is Idle */
1005 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
1017 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
1018 u8 insn[], bool as_manager)
1020 void __iomem *regs = thrd->dmac->pinfo->base;
1023 val = (insn[0] << 16) | (insn[1] << 24);
1026 val |= (thrd->id << 8); /* Channel Number */
1028 writel(val, regs + DBGINST0);
1030 val = *((u32 *)&insn[2]);
1031 writel(val, regs + DBGINST1);
1033 /* If timed out due to halted state-machine */
1034 if (_until_dmac_idle(thrd)) {
1035 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
1040 writel(0, regs + DBGCMD);
1044 * Mark a _pl330_req as free.
1045 * We do it by writing DMAEND as the first instruction
1046 * because no valid request is going to have DMAEND as
1047 * its first instruction to execute.
1049 static void mark_free(struct pl330_thread *thrd, int idx)
1051 struct _pl330_req *req = &thrd->req[idx];
1053 _emit_END(0, req->mc_cpu);
1056 thrd->req_running = -1;
1059 static inline u32 _state(struct pl330_thread *thrd)
1061 void __iomem *regs = thrd->dmac->pinfo->base;
1064 if (is_manager(thrd))
1065 val = readl(regs + DS) & 0xf;
1067 val = readl(regs + CS(thrd->id)) & 0xf;
1071 return PL330_STATE_STOPPED;
1073 return PL330_STATE_EXECUTING;
1075 return PL330_STATE_CACHEMISS;
1077 return PL330_STATE_UPDTPC;
1079 return PL330_STATE_WFE;
1081 return PL330_STATE_FAULTING;
1083 if (is_manager(thrd))
1084 return PL330_STATE_INVALID;
1086 return PL330_STATE_ATBARRIER;
1088 if (is_manager(thrd))
1089 return PL330_STATE_INVALID;
1091 return PL330_STATE_QUEUEBUSY;
1093 if (is_manager(thrd))
1094 return PL330_STATE_INVALID;
1096 return PL330_STATE_WFP;
1098 if (is_manager(thrd))
1099 return PL330_STATE_INVALID;
1101 return PL330_STATE_KILLING;
1103 if (is_manager(thrd))
1104 return PL330_STATE_INVALID;
1106 return PL330_STATE_COMPLETING;
1108 if (is_manager(thrd))
1109 return PL330_STATE_INVALID;
1111 return PL330_STATE_FAULT_COMPLETING;
1113 return PL330_STATE_INVALID;
1117 static void _stop(struct pl330_thread *thrd)
1119 void __iomem *regs = thrd->dmac->pinfo->base;
1120 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1122 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1123 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1125 /* Return if nothing needs to be done */
1126 if (_state(thrd) == PL330_STATE_COMPLETING
1127 || _state(thrd) == PL330_STATE_KILLING
1128 || _state(thrd) == PL330_STATE_STOPPED)
1131 _emit_KILL(0, insn);
1133 /* Stop generating interrupts for SEV */
1134 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1136 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1139 /* Start doing req 'idx' of thread 'thrd' */
1140 static bool _trigger(struct pl330_thread *thrd)
1142 void __iomem *regs = thrd->dmac->pinfo->base;
1143 struct _pl330_req *req;
1144 struct pl330_req *r;
1147 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1150 /* Return if already ACTIVE */
1151 if (_state(thrd) != PL330_STATE_STOPPED)
1154 idx = 1 - thrd->lstenq;
1155 if (!IS_FREE(&thrd->req[idx]))
1156 req = &thrd->req[idx];
1159 if (!IS_FREE(&thrd->req[idx]))
1160 req = &thrd->req[idx];
1165 /* Return if no request */
1166 if (!req || !req->r)
1172 ns = r->cfg->nonsecure ? 1 : 0;
1173 else if (readl(regs + CS(thrd->id)) & CS_CNS)
1178 /* See 'Abort Sources' point-4 at Page 2-25 */
1179 if (_manager_ns(thrd) && !ns)
1180 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
1181 __func__, __LINE__);
1184 go.addr = req->mc_bus;
1186 _emit_GO(0, insn, &go);
1188 /* Set to generate interrupts for SEV */
1189 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1191 /* Only manager can execute GO */
1192 _execute_DBGINSN(thrd, insn, true);
1194 thrd->req_running = idx;
1199 static bool _start(struct pl330_thread *thrd)
1201 switch (_state(thrd)) {
1202 case PL330_STATE_FAULT_COMPLETING:
1203 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1205 if (_state(thrd) == PL330_STATE_KILLING)
1206 UNTIL(thrd, PL330_STATE_STOPPED)
1208 case PL330_STATE_FAULTING:
1211 case PL330_STATE_KILLING:
1212 case PL330_STATE_COMPLETING:
1213 UNTIL(thrd, PL330_STATE_STOPPED)
1215 case PL330_STATE_STOPPED:
1216 return _trigger(thrd);
1218 case PL330_STATE_WFP:
1219 case PL330_STATE_QUEUEBUSY:
1220 case PL330_STATE_ATBARRIER:
1221 case PL330_STATE_UPDTPC:
1222 case PL330_STATE_CACHEMISS:
1223 case PL330_STATE_EXECUTING:
1226 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1232 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1233 const struct _xfer_spec *pxs, int cyc)
1236 struct pl330_config *pcfg = pxs->r->cfg->pcfg;
1238 /* check lock-up free version */
1239 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1241 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1242 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1246 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1247 off += _emit_RMB(dry_run, &buf[off]);
1248 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1249 off += _emit_WMB(dry_run, &buf[off]);
1256 static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1257 const struct _xfer_spec *pxs, int cyc)
1262 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1263 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1264 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1265 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1271 static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1272 const struct _xfer_spec *pxs, int cyc)
1277 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1278 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1279 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1280 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1286 static int _bursts(unsigned dry_run, u8 buf[],
1287 const struct _xfer_spec *pxs, int cyc)
1291 switch (pxs->r->rqtype) {
1292 case DMA_MEM_TO_DEV:
1293 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1295 case DMA_DEV_TO_MEM:
1296 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1298 case DMA_MEM_TO_MEM:
1299 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1302 off += 0x40000000; /* Scare off the Client */
1309 /* Returns bytes consumed and updates bursts */
1310 static inline int _loop(unsigned dry_run, u8 buf[],
1311 unsigned long *bursts, const struct _xfer_spec *pxs)
1313 int cyc, cycmax, szlp, szlpend, szbrst, off;
1314 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1315 struct _arg_LPEND lpend;
1317 /* Max iterations possible in DMALP is 256 */
1318 if (*bursts >= 256*256) {
1321 cyc = *bursts / lcnt1 / lcnt0;
1322 } else if (*bursts > 256) {
1324 lcnt0 = *bursts / lcnt1;
1332 szlp = _emit_LP(1, buf, 0, 0);
1333 szbrst = _bursts(1, buf, pxs, 1);
1335 lpend.cond = ALWAYS;
1336 lpend.forever = false;
1339 szlpend = _emit_LPEND(1, buf, &lpend);
1347 * Max bursts that we can unroll due to limit on the
1348 * size of backward jump that can be encoded in DMALPEND
1349 * which is 8-bits and hence 255
1351 cycmax = (255 - (szlp + szlpend)) / szbrst;
1353 cyc = (cycmax < cyc) ? cycmax : cyc;
1358 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1362 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1365 off += _bursts(dry_run, &buf[off], pxs, cyc);
1367 lpend.cond = ALWAYS;
1368 lpend.forever = false;
1370 lpend.bjump = off - ljmp1;
1371 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1374 lpend.cond = ALWAYS;
1375 lpend.forever = false;
1377 lpend.bjump = off - ljmp0;
1378 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1381 *bursts = lcnt1 * cyc;
1388 static inline int _setup_loops(unsigned dry_run, u8 buf[],
1389 const struct _xfer_spec *pxs)
1391 struct pl330_xfer *x = pxs->x;
1393 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1398 off += _loop(dry_run, &buf[off], &c, pxs);
1405 static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1406 const struct _xfer_spec *pxs)
1408 struct pl330_xfer *x = pxs->x;
1411 /* DMAMOV SAR, x->src_addr */
1412 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1413 /* DMAMOV DAR, x->dst_addr */
1414 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1417 off += _setup_loops(dry_run, &buf[off], pxs);
1423 * A req is a sequence of one or more xfer units.
1424 * Returns the number of bytes taken to setup the MC for the req.
1426 static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1427 unsigned index, struct _xfer_spec *pxs)
1429 struct _pl330_req *req = &thrd->req[index];
1430 struct pl330_xfer *x;
1431 u8 *buf = req->mc_cpu;
1434 PL330_DBGMC_START(req->mc_bus);
1436 /* DMAMOV CCR, ccr */
1437 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1441 /* Error if xfer length is not aligned at burst size */
1442 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1446 off += _setup_xfer(dry_run, &buf[off], pxs);
1451 /* DMASEV peripheral/event */
1452 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1454 off += _emit_END(dry_run, &buf[off]);
1459 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1469 /* We set same protection levels for Src and DST for now */
1470 if (rqc->privileged)
1471 ccr |= CC_SRCPRI | CC_DSTPRI;
1473 ccr |= CC_SRCNS | CC_DSTNS;
1474 if (rqc->insnaccess)
1475 ccr |= CC_SRCIA | CC_DSTIA;
1477 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1478 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1480 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1481 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1483 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1484 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1486 ccr |= (rqc->swap << CC_SWAP_SHFT);
1491 static inline bool _is_valid(u32 ccr)
1493 enum pl330_dstcachectrl dcctl;
1494 enum pl330_srccachectrl scctl;
1496 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1497 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1499 if (dcctl == DINVALID1 || dcctl == DINVALID2
1500 || scctl == SINVALID1 || scctl == SINVALID2)
1507 * Submit a list of xfers after which the client wants notification.
1508 * Client is not notified after each xfer unit, just once after all
1509 * xfer units are done or some error occurs.
1511 static int pl330_submit_req(void *ch_id, struct pl330_req *r)
1513 struct pl330_thread *thrd = ch_id;
1514 struct pl330_dmac *pl330;
1515 struct pl330_info *pi;
1516 struct _xfer_spec xs;
1517 unsigned long flags;
1523 /* No Req or Unacquired Channel or DMAC */
1524 if (!r || !thrd || thrd->free)
1531 if (pl330->state == DYING
1532 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1533 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1534 __func__, __LINE__);
1538 /* If request for non-existing peripheral */
1539 if (r->rqtype != DMA_MEM_TO_MEM && r->peri >= pi->pcfg.num_peri) {
1540 dev_info(thrd->dmac->pinfo->dev,
1541 "%s:%d Invalid peripheral(%u)!\n",
1542 __func__, __LINE__, r->peri);
1546 spin_lock_irqsave(&pl330->lock, flags);
1548 if (_queue_full(thrd)) {
1554 /* Use last settings, if not provided */
1556 /* Prefer Secure Channel */
1557 if (!_manager_ns(thrd))
1558 r->cfg->nonsecure = 0;
1560 r->cfg->nonsecure = 1;
1562 ccr = _prepare_ccr(r->cfg);
1564 ccr = readl(regs + CC(thrd->id));
1567 /* If this req doesn't have valid xfer settings */
1568 if (!_is_valid(ccr)) {
1570 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1571 __func__, __LINE__, ccr);
1575 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1580 /* First dry run to check if req is acceptable */
1581 ret = _setup_req(1, thrd, idx, &xs);
1585 if (ret > pi->mcbufsz / 2) {
1586 dev_info(thrd->dmac->pinfo->dev,
1587 "%s:%d Trying increasing mcbufsz\n",
1588 __func__, __LINE__);
1593 /* Hook the request */
1595 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1596 thrd->req[idx].r = r;
1601 spin_unlock_irqrestore(&pl330->lock, flags);
1606 static void pl330_dotask(unsigned long data)
1608 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1609 struct pl330_info *pi = pl330->pinfo;
1610 unsigned long flags;
1613 spin_lock_irqsave(&pl330->lock, flags);
1615 /* The DMAC itself gone nuts */
1616 if (pl330->dmac_tbd.reset_dmac) {
1617 pl330->state = DYING;
1618 /* Reset the manager too */
1619 pl330->dmac_tbd.reset_mngr = true;
1620 /* Clear the reset flag */
1621 pl330->dmac_tbd.reset_dmac = false;
1624 if (pl330->dmac_tbd.reset_mngr) {
1625 _stop(pl330->manager);
1626 /* Reset all channels */
1627 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1628 /* Clear the reset flag */
1629 pl330->dmac_tbd.reset_mngr = false;
1632 for (i = 0; i < pi->pcfg.num_chan; i++) {
1634 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1635 struct pl330_thread *thrd = &pl330->channels[i];
1636 void __iomem *regs = pi->base;
1637 enum pl330_op_err err;
1641 if (readl(regs + FSC) & (1 << thrd->id))
1642 err = PL330_ERR_FAIL;
1644 err = PL330_ERR_ABORT;
1646 spin_unlock_irqrestore(&pl330->lock, flags);
1648 _callback(thrd->req[1 - thrd->lstenq].r, err);
1649 _callback(thrd->req[thrd->lstenq].r, err);
1651 spin_lock_irqsave(&pl330->lock, flags);
1653 thrd->req[0].r = NULL;
1654 thrd->req[1].r = NULL;
1658 /* Clear the reset flag */
1659 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1663 spin_unlock_irqrestore(&pl330->lock, flags);
1668 /* Returns 1 if state was updated, 0 otherwise */
1669 static int pl330_update(const struct pl330_info *pi)
1671 struct pl330_req *rqdone, *tmp;
1672 struct pl330_dmac *pl330;
1673 unsigned long flags;
1676 int id, ev, ret = 0;
1678 if (!pi || !pi->pl330_data)
1682 pl330 = pi->pl330_data;
1684 spin_lock_irqsave(&pl330->lock, flags);
1686 val = readl(regs + FSM) & 0x1;
1688 pl330->dmac_tbd.reset_mngr = true;
1690 pl330->dmac_tbd.reset_mngr = false;
1692 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1693 pl330->dmac_tbd.reset_chan |= val;
1696 while (i < pi->pcfg.num_chan) {
1697 if (val & (1 << i)) {
1699 "Reset Channel-%d\t CS-%x FTC-%x\n",
1700 i, readl(regs + CS(i)),
1701 readl(regs + FTC(i)));
1702 _stop(&pl330->channels[i]);
1708 /* Check which event happened i.e, thread notified */
1709 val = readl(regs + ES);
1710 if (pi->pcfg.num_events < 32
1711 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1712 pl330->dmac_tbd.reset_dmac = true;
1713 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1718 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1719 if (val & (1 << ev)) { /* Event occurred */
1720 struct pl330_thread *thrd;
1721 u32 inten = readl(regs + INTEN);
1724 /* Clear the event */
1725 if (inten & (1 << ev))
1726 writel(1 << ev, regs + INTCLR);
1730 id = pl330->events[ev];
1732 thrd = &pl330->channels[id];
1734 active = thrd->req_running;
1735 if (active == -1) /* Aborted */
1738 /* Detach the req */
1739 rqdone = thrd->req[active].r;
1740 thrd->req[active].r = NULL;
1742 mark_free(thrd, active);
1744 /* Get going again ASAP */
1747 /* For now, just make a list of callbacks to be done */
1748 list_add_tail(&rqdone->rqd, &pl330->req_done);
1752 /* Now that we are in no hurry, do the callbacks */
1753 list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
1754 list_del(&rqdone->rqd);
1756 spin_unlock_irqrestore(&pl330->lock, flags);
1757 _callback(rqdone, PL330_ERR_NONE);
1758 spin_lock_irqsave(&pl330->lock, flags);
1762 spin_unlock_irqrestore(&pl330->lock, flags);
1764 if (pl330->dmac_tbd.reset_dmac
1765 || pl330->dmac_tbd.reset_mngr
1766 || pl330->dmac_tbd.reset_chan) {
1768 tasklet_schedule(&pl330->tasks);
1774 static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1776 struct pl330_thread *thrd = ch_id;
1777 struct pl330_dmac *pl330;
1778 unsigned long flags;
1779 int ret = 0, active;
1781 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1785 active = thrd->req_running;
1787 spin_lock_irqsave(&pl330->lock, flags);
1790 case PL330_OP_FLUSH:
1791 /* Make sure the channel is stopped */
1794 thrd->req[0].r = NULL;
1795 thrd->req[1].r = NULL;
1800 case PL330_OP_ABORT:
1801 /* Make sure the channel is stopped */
1804 /* ABORT is only for the active req */
1808 thrd->req[active].r = NULL;
1809 mark_free(thrd, active);
1811 /* Start the next */
1812 case PL330_OP_START:
1813 if ((active == -1) && !_start(thrd))
1821 spin_unlock_irqrestore(&pl330->lock, flags);
1825 /* Reserve an event */
1826 static inline int _alloc_event(struct pl330_thread *thrd)
1828 struct pl330_dmac *pl330 = thrd->dmac;
1829 struct pl330_info *pi = pl330->pinfo;
1832 for (ev = 0; ev < pi->pcfg.num_events; ev++)
1833 if (pl330->events[ev] == -1) {
1834 pl330->events[ev] = thrd->id;
1841 static bool _chan_ns(const struct pl330_info *pi, int i)
1843 return pi->pcfg.irq_ns & (1 << i);
1846 /* Upon success, returns IdentityToken for the
1847 * allocated channel, NULL otherwise.
1849 static void *pl330_request_channel(const struct pl330_info *pi)
1851 struct pl330_thread *thrd = NULL;
1852 struct pl330_dmac *pl330;
1853 unsigned long flags;
1856 if (!pi || !pi->pl330_data)
1859 pl330 = pi->pl330_data;
1861 if (pl330->state == DYING)
1864 chans = pi->pcfg.num_chan;
1866 spin_lock_irqsave(&pl330->lock, flags);
1868 for (i = 0; i < chans; i++) {
1869 thrd = &pl330->channels[i];
1870 if ((thrd->free) && (!_manager_ns(thrd) ||
1872 thrd->ev = _alloc_event(thrd);
1873 if (thrd->ev >= 0) {
1876 thrd->req[0].r = NULL;
1878 thrd->req[1].r = NULL;
1886 spin_unlock_irqrestore(&pl330->lock, flags);
1891 /* Release an event */
1892 static inline void _free_event(struct pl330_thread *thrd, int ev)
1894 struct pl330_dmac *pl330 = thrd->dmac;
1895 struct pl330_info *pi = pl330->pinfo;
1897 /* If the event is valid and was held by the thread */
1898 if (ev >= 0 && ev < pi->pcfg.num_events
1899 && pl330->events[ev] == thrd->id)
1900 pl330->events[ev] = -1;
1903 static void pl330_release_channel(void *ch_id)
1905 struct pl330_thread *thrd = ch_id;
1906 struct pl330_dmac *pl330;
1907 unsigned long flags;
1909 if (!thrd || thrd->free)
1914 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1915 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1919 spin_lock_irqsave(&pl330->lock, flags);
1920 _free_event(thrd, thrd->ev);
1922 spin_unlock_irqrestore(&pl330->lock, flags);
1925 /* Initialize the structure for PL330 configuration, that can be used
1926 * by the client driver the make best use of the DMAC
1928 static void read_dmac_config(struct pl330_info *pi)
1930 void __iomem *regs = pi->base;
1933 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1934 val &= CRD_DATA_WIDTH_MASK;
1935 pi->pcfg.data_bus_width = 8 * (1 << val);
1937 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1938 val &= CRD_DATA_BUFF_MASK;
1939 pi->pcfg.data_buf_dep = val + 1;
1941 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1942 val &= CR0_NUM_CHANS_MASK;
1944 pi->pcfg.num_chan = val;
1946 val = readl(regs + CR0);
1947 if (val & CR0_PERIPH_REQ_SET) {
1948 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1950 pi->pcfg.num_peri = val;
1951 pi->pcfg.peri_ns = readl(regs + CR4);
1953 pi->pcfg.num_peri = 0;
1956 val = readl(regs + CR0);
1957 if (val & CR0_BOOT_MAN_NS)
1958 pi->pcfg.mode |= DMAC_MODE_NS;
1960 pi->pcfg.mode &= ~DMAC_MODE_NS;
1962 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1963 val &= CR0_NUM_EVENTS_MASK;
1965 pi->pcfg.num_events = val;
1967 pi->pcfg.irq_ns = readl(regs + CR3);
1970 static inline void _reset_thread(struct pl330_thread *thrd)
1972 struct pl330_dmac *pl330 = thrd->dmac;
1973 struct pl330_info *pi = pl330->pinfo;
1975 thrd->req[0].mc_cpu = pl330->mcode_cpu
1976 + (thrd->id * pi->mcbufsz);
1977 thrd->req[0].mc_bus = pl330->mcode_bus
1978 + (thrd->id * pi->mcbufsz);
1979 thrd->req[0].r = NULL;
1982 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1984 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1986 thrd->req[1].r = NULL;
1990 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1992 struct pl330_info *pi = pl330->pinfo;
1993 int chans = pi->pcfg.num_chan;
1994 struct pl330_thread *thrd;
1997 /* Allocate 1 Manager and 'chans' Channel threads */
1998 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
2000 if (!pl330->channels)
2003 /* Init Channel threads */
2004 for (i = 0; i < chans; i++) {
2005 thrd = &pl330->channels[i];
2008 _reset_thread(thrd);
2012 /* MANAGER is indexed at the end */
2013 thrd = &pl330->channels[chans];
2017 pl330->manager = thrd;
2022 static int dmac_alloc_resources(struct pl330_dmac *pl330)
2024 struct pl330_info *pi = pl330->pinfo;
2025 int chans = pi->pcfg.num_chan;
2029 * Alloc MicroCode buffer for 'chans' Channel threads.
2030 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2032 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
2033 chans * pi->mcbufsz,
2034 &pl330->mcode_bus, GFP_KERNEL);
2035 if (!pl330->mcode_cpu) {
2036 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2037 __func__, __LINE__);
2041 ret = dmac_alloc_threads(pl330);
2043 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
2044 __func__, __LINE__);
2045 dma_free_coherent(pi->dev,
2046 chans * pi->mcbufsz,
2047 pl330->mcode_cpu, pl330->mcode_bus);
2054 static int pl330_add(struct pl330_info *pi)
2056 struct pl330_dmac *pl330;
2060 if (!pi || !pi->dev)
2063 /* If already added */
2068 * If the SoC can perform reset on the DMAC, then do it
2069 * before reading its configuration.
2076 /* Check if we can handle this DMAC */
2077 if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
2078 dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id);
2082 /* Read the configuration of the DMAC */
2083 read_dmac_config(pi);
2085 if (pi->pcfg.num_events == 0) {
2086 dev_err(pi->dev, "%s:%d Can't work without events!\n",
2087 __func__, __LINE__);
2091 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
2093 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2094 __func__, __LINE__);
2098 /* Assign the info structure and private data */
2100 pi->pl330_data = pl330;
2102 spin_lock_init(&pl330->lock);
2104 INIT_LIST_HEAD(&pl330->req_done);
2106 /* Use default MC buffer size if not provided */
2108 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
2110 /* Mark all events as free */
2111 for (i = 0; i < pi->pcfg.num_events; i++)
2112 pl330->events[i] = -1;
2114 /* Allocate resources needed by the DMAC */
2115 ret = dmac_alloc_resources(pl330);
2117 dev_err(pi->dev, "Unable to create channels for DMAC\n");
2122 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
2124 pl330->state = INIT;
2129 static int dmac_free_threads(struct pl330_dmac *pl330)
2131 struct pl330_info *pi = pl330->pinfo;
2132 int chans = pi->pcfg.num_chan;
2133 struct pl330_thread *thrd;
2136 /* Release Channel threads */
2137 for (i = 0; i < chans; i++) {
2138 thrd = &pl330->channels[i];
2139 pl330_release_channel((void *)thrd);
2143 kfree(pl330->channels);
2148 static void dmac_free_resources(struct pl330_dmac *pl330)
2150 struct pl330_info *pi = pl330->pinfo;
2151 int chans = pi->pcfg.num_chan;
2153 dmac_free_threads(pl330);
2155 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
2156 pl330->mcode_cpu, pl330->mcode_bus);
2159 static void pl330_del(struct pl330_info *pi)
2161 struct pl330_dmac *pl330;
2163 if (!pi || !pi->pl330_data)
2166 pl330 = pi->pl330_data;
2168 pl330->state = UNINIT;
2170 tasklet_kill(&pl330->tasks);
2172 /* Free DMAC resources */
2173 dmac_free_resources(pl330);
2176 pi->pl330_data = NULL;
2179 /* forward declaration */
2180 static struct amba_driver pl330_driver;
2182 static inline struct dma_pl330_chan *
2183 to_pchan(struct dma_chan *ch)
2188 return container_of(ch, struct dma_pl330_chan, chan);
2191 static inline struct dma_pl330_desc *
2192 to_desc(struct dma_async_tx_descriptor *tx)
2194 return container_of(tx, struct dma_pl330_desc, txd);
2197 static inline void fill_queue(struct dma_pl330_chan *pch)
2199 struct dma_pl330_desc *desc;
2202 list_for_each_entry(desc, &pch->work_list, node) {
2204 /* If already submitted */
2205 if (desc->status == BUSY)
2208 ret = pl330_submit_req(pch->pl330_chid,
2211 desc->status = BUSY;
2212 } else if (ret == -EAGAIN) {
2213 /* QFull or DMAC Dying */
2216 /* Unacceptable request */
2217 desc->status = DONE;
2218 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
2219 __func__, __LINE__, desc->txd.cookie);
2220 tasklet_schedule(&pch->task);
2225 static void pl330_tasklet(unsigned long data)
2227 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2228 struct dma_pl330_desc *desc, *_dt;
2229 unsigned long flags;
2231 spin_lock_irqsave(&pch->lock, flags);
2233 /* Pick up ripe tomatoes */
2234 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2235 if (desc->status == DONE) {
2237 dma_cookie_complete(&desc->txd);
2238 list_move_tail(&desc->node, &pch->completed_list);
2241 /* Try to submit a req imm. next to the last completed cookie */
2244 /* Make sure the PL330 Channel thread is active */
2245 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
2247 while (!list_empty(&pch->completed_list)) {
2248 dma_async_tx_callback callback;
2249 void *callback_param;
2251 desc = list_first_entry(&pch->completed_list,
2252 struct dma_pl330_desc, node);
2254 callback = desc->txd.callback;
2255 callback_param = desc->txd.callback_param;
2258 desc->status = PREP;
2259 list_move_tail(&desc->node, &pch->work_list);
2261 desc->status = FREE;
2262 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2265 dma_descriptor_unmap(&desc->txd);
2268 spin_unlock_irqrestore(&pch->lock, flags);
2269 callback(callback_param);
2270 spin_lock_irqsave(&pch->lock, flags);
2273 spin_unlock_irqrestore(&pch->lock, flags);
2276 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
2278 struct dma_pl330_desc *desc = token;
2279 struct dma_pl330_chan *pch = desc->pchan;
2280 unsigned long flags;
2282 /* If desc aborted */
2286 spin_lock_irqsave(&pch->lock, flags);
2288 desc->status = DONE;
2290 spin_unlock_irqrestore(&pch->lock, flags);
2292 tasklet_schedule(&pch->task);
2295 bool pl330_filter(struct dma_chan *chan, void *param)
2299 if (chan->device->dev->driver != &pl330_driver.drv)
2302 peri_id = chan->private;
2303 return *peri_id == (unsigned long)param;
2305 EXPORT_SYMBOL(pl330_filter);
2307 static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2308 struct of_dma *ofdma)
2310 int count = dma_spec->args_count;
2311 struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
2312 unsigned int chan_id;
2317 chan_id = dma_spec->args[0];
2318 if (chan_id >= pdmac->num_peripherals)
2321 return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
2324 static int pl330_alloc_chan_resources(struct dma_chan *chan)
2326 struct dma_pl330_chan *pch = to_pchan(chan);
2327 struct dma_pl330_dmac *pdmac = pch->dmac;
2328 unsigned long flags;
2330 spin_lock_irqsave(&pch->lock, flags);
2332 dma_cookie_init(chan);
2333 pch->cyclic = false;
2335 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
2336 if (!pch->pl330_chid) {
2337 spin_unlock_irqrestore(&pch->lock, flags);
2341 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2343 spin_unlock_irqrestore(&pch->lock, flags);
2348 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
2350 struct dma_pl330_chan *pch = to_pchan(chan);
2351 struct dma_pl330_desc *desc;
2352 unsigned long flags;
2353 struct dma_pl330_dmac *pdmac = pch->dmac;
2354 struct dma_slave_config *slave_config;
2358 case DMA_TERMINATE_ALL:
2359 spin_lock_irqsave(&pch->lock, flags);
2361 /* FLUSH the PL330 Channel thread */
2362 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
2364 /* Mark all desc done */
2365 list_for_each_entry(desc, &pch->submitted_list, node) {
2366 desc->status = FREE;
2367 dma_cookie_complete(&desc->txd);
2370 list_for_each_entry(desc, &pch->work_list , node) {
2371 desc->status = FREE;
2372 dma_cookie_complete(&desc->txd);
2375 list_for_each_entry(desc, &pch->completed_list , node) {
2376 desc->status = FREE;
2377 dma_cookie_complete(&desc->txd);
2380 list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
2381 list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
2382 list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
2383 spin_unlock_irqrestore(&pch->lock, flags);
2385 case DMA_SLAVE_CONFIG:
2386 slave_config = (struct dma_slave_config *)arg;
2388 if (slave_config->direction == DMA_MEM_TO_DEV) {
2389 if (slave_config->dst_addr)
2390 pch->fifo_addr = slave_config->dst_addr;
2391 if (slave_config->dst_addr_width)
2392 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2393 if (slave_config->dst_maxburst)
2394 pch->burst_len = slave_config->dst_maxburst;
2395 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2396 if (slave_config->src_addr)
2397 pch->fifo_addr = slave_config->src_addr;
2398 if (slave_config->src_addr_width)
2399 pch->burst_sz = __ffs(slave_config->src_addr_width);
2400 if (slave_config->src_maxburst)
2401 pch->burst_len = slave_config->src_maxburst;
2405 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
2412 static void pl330_free_chan_resources(struct dma_chan *chan)
2414 struct dma_pl330_chan *pch = to_pchan(chan);
2415 unsigned long flags;
2417 tasklet_kill(&pch->task);
2419 spin_lock_irqsave(&pch->lock, flags);
2421 pl330_release_channel(pch->pl330_chid);
2422 pch->pl330_chid = NULL;
2425 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2427 spin_unlock_irqrestore(&pch->lock, flags);
2430 static enum dma_status
2431 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2432 struct dma_tx_state *txstate)
2434 return dma_cookie_status(chan, cookie, txstate);
2437 static void pl330_issue_pending(struct dma_chan *chan)
2439 struct dma_pl330_chan *pch = to_pchan(chan);
2440 unsigned long flags;
2442 spin_lock_irqsave(&pch->lock, flags);
2443 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2444 spin_unlock_irqrestore(&pch->lock, flags);
2446 pl330_tasklet((unsigned long)pch);
2450 * We returned the last one of the circular list of descriptor(s)
2451 * from prep_xxx, so the argument to submit corresponds to the last
2452 * descriptor of the list.
2454 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2456 struct dma_pl330_desc *desc, *last = to_desc(tx);
2457 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2458 dma_cookie_t cookie;
2459 unsigned long flags;
2461 spin_lock_irqsave(&pch->lock, flags);
2463 /* Assign cookies to all nodes */
2464 while (!list_empty(&last->node)) {
2465 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2467 desc->txd.callback = last->txd.callback;
2468 desc->txd.callback_param = last->txd.callback_param;
2471 dma_cookie_assign(&desc->txd);
2473 list_move_tail(&desc->node, &pch->submitted_list);
2476 cookie = dma_cookie_assign(&last->txd);
2477 list_add_tail(&last->node, &pch->submitted_list);
2478 spin_unlock_irqrestore(&pch->lock, flags);
2483 static inline void _init_desc(struct dma_pl330_desc *desc)
2485 desc->req.x = &desc->px;
2486 desc->req.token = desc;
2487 desc->rqcfg.swap = SWAP_NO;
2488 desc->rqcfg.scctl = SCCTRL0;
2489 desc->rqcfg.dcctl = DCCTRL0;
2490 desc->req.cfg = &desc->rqcfg;
2491 desc->req.xfer_cb = dma_pl330_rqcb;
2492 desc->txd.tx_submit = pl330_tx_submit;
2494 INIT_LIST_HEAD(&desc->node);
2497 /* Returns the number of descriptors added to the DMAC pool */
2498 static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2500 struct dma_pl330_desc *desc;
2501 unsigned long flags;
2507 desc = kcalloc(count, sizeof(*desc), flg);
2511 spin_lock_irqsave(&pdmac->pool_lock, flags);
2513 for (i = 0; i < count; i++) {
2514 _init_desc(&desc[i]);
2515 list_add_tail(&desc[i].node, &pdmac->desc_pool);
2518 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2523 static struct dma_pl330_desc *
2524 pluck_desc(struct dma_pl330_dmac *pdmac)
2526 struct dma_pl330_desc *desc = NULL;
2527 unsigned long flags;
2532 spin_lock_irqsave(&pdmac->pool_lock, flags);
2534 if (!list_empty(&pdmac->desc_pool)) {
2535 desc = list_entry(pdmac->desc_pool.next,
2536 struct dma_pl330_desc, node);
2538 list_del_init(&desc->node);
2540 desc->status = PREP;
2541 desc->txd.callback = NULL;
2544 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2549 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2551 struct dma_pl330_dmac *pdmac = pch->dmac;
2552 u8 *peri_id = pch->chan.private;
2553 struct dma_pl330_desc *desc;
2555 /* Pluck one desc from the pool of DMAC */
2556 desc = pluck_desc(pdmac);
2558 /* If the DMAC pool is empty, alloc new */
2560 if (!add_desc(pdmac, GFP_ATOMIC, 1))
2564 desc = pluck_desc(pdmac);
2566 dev_err(pch->dmac->pif.dev,
2567 "%s:%d ALERT!\n", __func__, __LINE__);
2572 /* Initialize the descriptor */
2574 desc->txd.cookie = 0;
2575 async_tx_ack(&desc->txd);
2577 desc->req.peri = peri_id ? pch->chan.chan_id : 0;
2578 desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
2580 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2585 static inline void fill_px(struct pl330_xfer *px,
2586 dma_addr_t dst, dma_addr_t src, size_t len)
2594 static struct dma_pl330_desc *
2595 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2596 dma_addr_t src, size_t len)
2598 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2601 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2602 __func__, __LINE__);
2607 * Ideally we should lookout for reqs bigger than
2608 * those that can be programmed with 256 bytes of
2609 * MC buffer, but considering a req size is seldom
2610 * going to be word-unaligned and more than 200MB,
2612 * Also, should the limit is reached we'd rather
2613 * have the platform increase MC buffer size than
2614 * complicating this API driver.
2616 fill_px(&desc->px, dst, src, len);
2621 /* Call after fixing burst size */
2622 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2624 struct dma_pl330_chan *pch = desc->pchan;
2625 struct pl330_info *pi = &pch->dmac->pif;
2628 burst_len = pi->pcfg.data_bus_width / 8;
2629 burst_len *= pi->pcfg.data_buf_dep;
2630 burst_len >>= desc->rqcfg.brst_size;
2632 /* src/dst_burst_len can't be more than 16 */
2636 while (burst_len > 1) {
2637 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2645 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2646 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2647 size_t period_len, enum dma_transfer_direction direction,
2648 unsigned long flags, void *context)
2650 struct dma_pl330_desc *desc = NULL, *first = NULL;
2651 struct dma_pl330_chan *pch = to_pchan(chan);
2652 struct dma_pl330_dmac *pdmac = pch->dmac;
2657 if (len % period_len != 0)
2660 if (!is_slave_direction(direction)) {
2661 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
2662 __func__, __LINE__);
2666 for (i = 0; i < len / period_len; i++) {
2667 desc = pl330_get_desc(pch);
2669 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2670 __func__, __LINE__);
2675 spin_lock_irqsave(&pdmac->pool_lock, flags);
2677 while (!list_empty(&first->node)) {
2678 desc = list_entry(first->node.next,
2679 struct dma_pl330_desc, node);
2680 list_move_tail(&desc->node, &pdmac->desc_pool);
2683 list_move_tail(&first->node, &pdmac->desc_pool);
2685 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2690 switch (direction) {
2691 case DMA_MEM_TO_DEV:
2692 desc->rqcfg.src_inc = 1;
2693 desc->rqcfg.dst_inc = 0;
2695 dst = pch->fifo_addr;
2697 case DMA_DEV_TO_MEM:
2698 desc->rqcfg.src_inc = 0;
2699 desc->rqcfg.dst_inc = 1;
2700 src = pch->fifo_addr;
2707 desc->req.rqtype = direction;
2708 desc->rqcfg.brst_size = pch->burst_sz;
2709 desc->rqcfg.brst_len = 1;
2710 fill_px(&desc->px, dst, src, period_len);
2715 list_add_tail(&desc->node, &first->node);
2717 dma_addr += period_len;
2724 desc->txd.flags = flags;
2729 static struct dma_async_tx_descriptor *
2730 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2731 dma_addr_t src, size_t len, unsigned long flags)
2733 struct dma_pl330_desc *desc;
2734 struct dma_pl330_chan *pch = to_pchan(chan);
2735 struct pl330_info *pi;
2738 if (unlikely(!pch || !len))
2741 pi = &pch->dmac->pif;
2743 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2747 desc->rqcfg.src_inc = 1;
2748 desc->rqcfg.dst_inc = 1;
2749 desc->req.rqtype = DMA_MEM_TO_MEM;
2751 /* Select max possible burst size */
2752 burst = pi->pcfg.data_bus_width / 8;
2760 desc->rqcfg.brst_size = 0;
2761 while (burst != (1 << desc->rqcfg.brst_size))
2762 desc->rqcfg.brst_size++;
2764 desc->rqcfg.brst_len = get_burst_len(desc, len);
2766 desc->txd.flags = flags;
2771 static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
2772 struct dma_pl330_desc *first)
2774 unsigned long flags;
2775 struct dma_pl330_desc *desc;
2780 spin_lock_irqsave(&pdmac->pool_lock, flags);
2782 while (!list_empty(&first->node)) {
2783 desc = list_entry(first->node.next,
2784 struct dma_pl330_desc, node);
2785 list_move_tail(&desc->node, &pdmac->desc_pool);
2788 list_move_tail(&first->node, &pdmac->desc_pool);
2790 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2793 static struct dma_async_tx_descriptor *
2794 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2795 unsigned int sg_len, enum dma_transfer_direction direction,
2796 unsigned long flg, void *context)
2798 struct dma_pl330_desc *first, *desc = NULL;
2799 struct dma_pl330_chan *pch = to_pchan(chan);
2800 struct scatterlist *sg;
2804 if (unlikely(!pch || !sgl || !sg_len))
2807 addr = pch->fifo_addr;
2811 for_each_sg(sgl, sg, sg_len, i) {
2813 desc = pl330_get_desc(pch);
2815 struct dma_pl330_dmac *pdmac = pch->dmac;
2817 dev_err(pch->dmac->pif.dev,
2818 "%s:%d Unable to fetch desc\n",
2819 __func__, __LINE__);
2820 __pl330_giveback_desc(pdmac, first);
2828 list_add_tail(&desc->node, &first->node);
2830 if (direction == DMA_MEM_TO_DEV) {
2831 desc->rqcfg.src_inc = 1;
2832 desc->rqcfg.dst_inc = 0;
2834 addr, sg_dma_address(sg), sg_dma_len(sg));
2836 desc->rqcfg.src_inc = 0;
2837 desc->rqcfg.dst_inc = 1;
2839 sg_dma_address(sg), addr, sg_dma_len(sg));
2842 desc->rqcfg.brst_size = pch->burst_sz;
2843 desc->rqcfg.brst_len = 1;
2844 desc->req.rqtype = direction;
2847 /* Return the last desc in the chain */
2848 desc->txd.flags = flg;
2852 static irqreturn_t pl330_irq_handler(int irq, void *data)
2854 if (pl330_update(data))
2860 #define PL330_DMA_BUSWIDTHS \
2861 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2862 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2863 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2864 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2865 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2867 static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
2868 struct dma_slave_caps *caps)
2870 caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
2871 caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
2872 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2873 caps->cmd_pause = false;
2874 caps->cmd_terminate = true;
2875 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2881 pl330_probe(struct amba_device *adev, const struct amba_id *id)
2883 struct dma_pl330_platdata *pdat;
2884 struct dma_pl330_dmac *pdmac;
2885 struct dma_pl330_chan *pch, *_p;
2886 struct pl330_info *pi;
2887 struct dma_device *pd;
2888 struct resource *res;
2892 pdat = dev_get_platdata(&adev->dev);
2894 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2898 /* Allocate a new DMAC and its Channels */
2899 pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
2901 dev_err(&adev->dev, "unable to allocate mem\n");
2906 pi->dev = &adev->dev;
2907 pi->pl330_data = NULL;
2908 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
2911 pi->base = devm_ioremap_resource(&adev->dev, res);
2912 if (IS_ERR(pi->base))
2913 return PTR_ERR(pi->base);
2915 amba_set_drvdata(adev, pdmac);
2917 for (i = 0; i < AMBA_NR_IRQS; i++) {
2920 ret = devm_request_irq(&adev->dev, irq,
2921 pl330_irq_handler, 0,
2922 dev_name(&adev->dev), pi);
2930 pi->pcfg.periph_id = adev->periphid;
2931 ret = pl330_add(pi);
2935 INIT_LIST_HEAD(&pdmac->desc_pool);
2936 spin_lock_init(&pdmac->pool_lock);
2938 /* Create a descriptor pool of default size */
2939 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
2940 dev_warn(&adev->dev, "unable to allocate desc\n");
2943 INIT_LIST_HEAD(&pd->channels);
2945 /* Initialize channel parameters */
2947 num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
2949 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
2951 pdmac->num_peripherals = num_chan;
2953 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2954 if (!pdmac->peripherals) {
2956 dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
2960 for (i = 0; i < num_chan; i++) {
2961 pch = &pdmac->peripherals[i];
2962 if (!adev->dev.of_node)
2963 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
2965 pch->chan.private = adev->dev.of_node;
2967 INIT_LIST_HEAD(&pch->submitted_list);
2968 INIT_LIST_HEAD(&pch->work_list);
2969 INIT_LIST_HEAD(&pch->completed_list);
2970 spin_lock_init(&pch->lock);
2971 pch->pl330_chid = NULL;
2972 pch->chan.device = pd;
2975 /* Add the channel to the DMAC list */
2976 list_add_tail(&pch->chan.device_node, &pd->channels);
2979 pd->dev = &adev->dev;
2981 pd->cap_mask = pdat->cap_mask;
2983 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
2984 if (pi->pcfg.num_peri) {
2985 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2986 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
2987 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
2991 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2992 pd->device_free_chan_resources = pl330_free_chan_resources;
2993 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
2994 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2995 pd->device_tx_status = pl330_tx_status;
2996 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2997 pd->device_control = pl330_control;
2998 pd->device_issue_pending = pl330_issue_pending;
2999 pd->device_slave_caps = pl330_dma_device_slave_caps;
3001 ret = dma_async_device_register(pd);
3003 dev_err(&adev->dev, "unable to register DMAC\n");
3007 if (adev->dev.of_node) {
3008 ret = of_dma_controller_register(adev->dev.of_node,
3009 of_dma_pl330_xlate, pdmac);
3012 "unable to register DMA to the generic DT DMA helpers\n");
3016 adev->dev.dma_parms = &pdmac->dma_parms;
3019 * This is the limit for transfers with a buswidth of 1, larger
3020 * buswidths will have larger limits.
3022 ret = dma_set_max_seg_size(&adev->dev, 1900800);
3024 dev_err(&adev->dev, "unable to set the seg size\n");
3027 dev_info(&adev->dev,
3028 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
3029 dev_info(&adev->dev,
3030 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
3031 pi->pcfg.data_buf_dep,
3032 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
3033 pi->pcfg.num_peri, pi->pcfg.num_events);
3038 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3041 /* Remove the channel */
3042 list_del(&pch->chan.device_node);
3044 /* Flush the channel */
3045 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3046 pl330_free_chan_resources(&pch->chan);
3054 static int pl330_remove(struct amba_device *adev)
3056 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
3057 struct dma_pl330_chan *pch, *_p;
3058 struct pl330_info *pi;
3063 if (adev->dev.of_node)
3064 of_dma_controller_free(adev->dev.of_node);
3066 dma_async_device_unregister(&pdmac->ddma);
3069 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3072 /* Remove the channel */
3073 list_del(&pch->chan.device_node);
3075 /* Flush the channel */
3076 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3077 pl330_free_chan_resources(&pch->chan);
3087 static struct amba_id pl330_ids[] = {
3095 MODULE_DEVICE_TABLE(amba, pl330_ids);
3097 static struct amba_driver pl330_driver = {
3099 .owner = THIS_MODULE,
3100 .name = "dma-pl330",
3102 .id_table = pl330_ids,
3103 .probe = pl330_probe,
3104 .remove = pl330_remove,
3107 module_amba_driver(pl330_driver);
3109 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3110 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3111 MODULE_LICENSE("GPL");