2 * Applied Micro X-Gene SoC DMA engine Driver
4 * Copyright (c) 2015, Applied Micro Circuits Corporation
5 * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 * NOTE: PM support is currently not available.
24 #include <linux/acpi.h>
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dmapool.h>
30 #include <linux/interrupt.h>
32 #include <linux/module.h>
33 #include <linux/of_device.h>
35 #include "dmaengine.h"
37 /* X-Gene DMA ring csr registers and bit definations */
38 #define XGENE_DMA_RING_CONFIG 0x04
39 #define XGENE_DMA_RING_ENABLE BIT(31)
40 #define XGENE_DMA_RING_ID 0x08
41 #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
42 #define XGENE_DMA_RING_ID_BUF 0x0C
43 #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
44 #define XGENE_DMA_RING_THRESLD0_SET1 0x30
45 #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
46 #define XGENE_DMA_RING_THRESLD1_SET1 0x34
47 #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
48 #define XGENE_DMA_RING_HYSTERESIS 0x68
49 #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
50 #define XGENE_DMA_RING_STATE 0x6C
51 #define XGENE_DMA_RING_STATE_WR_BASE 0x70
52 #define XGENE_DMA_RING_NE_INT_MODE 0x017C
53 #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
54 ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
55 #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
56 ((m) &= (~BIT(31 - (v))))
57 #define XGENE_DMA_RING_CLKEN 0xC208
58 #define XGENE_DMA_RING_SRST 0xC200
59 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
60 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
61 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
62 #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
63 #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
64 #define XGENE_DMA_RING_CMD_OFFSET 0x2C
65 #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
66 #define XGENE_DMA_RING_COHERENT_SET(m) \
67 (((u32 *)(m))[2] |= BIT(4))
68 #define XGENE_DMA_RING_ADDRL_SET(m, v) \
69 (((u32 *)(m))[2] |= (((v) >> 8) << 5))
70 #define XGENE_DMA_RING_ADDRH_SET(m, v) \
71 (((u32 *)(m))[3] |= ((v) >> 35))
72 #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
73 (((u32 *)(m))[3] |= BIT(19))
74 #define XGENE_DMA_RING_SIZE_SET(m, v) \
75 (((u32 *)(m))[3] |= ((v) << 23))
76 #define XGENE_DMA_RING_RECOMBBUF_SET(m) \
77 (((u32 *)(m))[3] |= BIT(27))
78 #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
79 (((u32 *)(m))[3] |= (0x7 << 28))
80 #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
81 (((u32 *)(m))[4] |= 0x3)
82 #define XGENE_DMA_RING_SELTHRSH_SET(m) \
83 (((u32 *)(m))[4] |= BIT(3))
84 #define XGENE_DMA_RING_TYPE_SET(m, v) \
85 (((u32 *)(m))[4] |= ((v) << 19))
87 /* X-Gene DMA device csr registers and bit definitions */
88 #define XGENE_DMA_IPBRR 0x0
89 #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
90 #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
91 #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
92 #define XGENE_DMA_GCR 0x10
93 #define XGENE_DMA_CH_SETUP(v) \
94 ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
95 #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
96 #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
97 #define XGENE_DMA_RAID6_CONT 0x14
98 #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
99 #define XGENE_DMA_INT 0x70
100 #define XGENE_DMA_INT_MASK 0x74
101 #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
102 #define XGENE_DMA_INT_ALL_UNMASK 0x0
103 #define XGENE_DMA_INT_MASK_SHIFT 0x14
104 #define XGENE_DMA_RING_INT0_MASK 0x90A0
105 #define XGENE_DMA_RING_INT1_MASK 0x90A8
106 #define XGENE_DMA_RING_INT2_MASK 0x90B0
107 #define XGENE_DMA_RING_INT3_MASK 0x90B8
108 #define XGENE_DMA_RING_INT4_MASK 0x90C0
109 #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
110 #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
111 #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112 #define XGENE_DMA_BLK_MEM_RDY 0xD074
113 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
114 #define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
116 /* X-Gene SoC EFUSE csr register and bit defination */
117 #define XGENE_SOC_JTAG1_SHADOW 0x18
118 #define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
120 /* X-Gene DMA Descriptor format */
121 #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
122 #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
123 #define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
124 #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
125 #define XGENE_DMA_DESC_ELERR_POS 46
126 #define XGENE_DMA_DESC_RTYPE_POS 56
127 #define XGENE_DMA_DESC_LERR_POS 60
128 #define XGENE_DMA_DESC_BUFLEN_POS 48
129 #define XGENE_DMA_DESC_HOENQ_NUM_POS 48
130 #define XGENE_DMA_DESC_ELERR_RD(m) \
131 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
132 #define XGENE_DMA_DESC_LERR_RD(m) \
133 (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
134 #define XGENE_DMA_DESC_STATUS(elerr, lerr) \
135 (((elerr) << 4) | (lerr))
137 /* X-Gene DMA descriptor empty s/w signature */
138 #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
140 /* X-Gene DMA configurable parameters defines */
141 #define XGENE_DMA_RING_NUM 512
142 #define XGENE_DMA_BUFNUM 0x0
143 #define XGENE_DMA_CPU_BUFNUM 0x18
144 #define XGENE_DMA_RING_OWNER_DMA 0x03
145 #define XGENE_DMA_RING_OWNER_CPU 0x0F
146 #define XGENE_DMA_RING_TYPE_REGULAR 0x01
147 #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
148 #define XGENE_DMA_RING_NUM_CONFIG 5
149 #define XGENE_DMA_MAX_CHANNEL 4
150 #define XGENE_DMA_XOR_CHANNEL 0
151 #define XGENE_DMA_PQ_CHANNEL 1
152 #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
153 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
154 #define XGENE_DMA_MAX_XOR_SRC 5
155 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
156 #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
158 /* X-Gene DMA descriptor error codes */
159 #define ERR_DESC_AXI 0x01
160 #define ERR_BAD_DESC 0x02
161 #define ERR_READ_DATA_AXI 0x03
162 #define ERR_WRITE_DATA_AXI 0x04
163 #define ERR_FBP_TIMEOUT 0x05
165 #define ERR_DIFF_SIZE 0x08
166 #define ERR_SCT_GAT_LEN 0x09
167 #define ERR_CRC_ERR 0x11
168 #define ERR_CHKSUM 0x12
171 /* X-Gene DMA error interrupt codes */
172 #define ERR_DIF_SIZE_INT 0x0
173 #define ERR_GS_ERR_INT 0x1
174 #define ERR_FPB_TIMEO_INT 0x2
175 #define ERR_WFIFO_OVF_INT 0x3
176 #define ERR_RFIFO_OVF_INT 0x4
177 #define ERR_WR_TIMEO_INT 0x5
178 #define ERR_RD_TIMEO_INT 0x6
179 #define ERR_WR_ERR_INT 0x7
180 #define ERR_RD_ERR_INT 0x8
181 #define ERR_BAD_DESC_INT 0x9
182 #define ERR_DESC_DST_INT 0xA
183 #define ERR_DESC_SRC_INT 0xB
185 /* X-Gene DMA flyby operation code */
186 #define FLYBY_2SRC_XOR 0x80
187 #define FLYBY_3SRC_XOR 0x90
188 #define FLYBY_4SRC_XOR 0xA0
189 #define FLYBY_5SRC_XOR 0xB0
191 /* X-Gene DMA SW descriptor flags */
192 #define XGENE_DMA_FLAG_64B_DESC BIT(0)
194 /* Define to dump X-Gene DMA descriptor */
195 #define XGENE_DMA_DESC_DUMP(desc, m) \
196 print_hex_dump(KERN_ERR, (m), \
197 DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
199 #define to_dma_desc_sw(tx) \
200 container_of(tx, struct xgene_dma_desc_sw, tx)
201 #define to_dma_chan(dchan) \
202 container_of(dchan, struct xgene_dma_chan, dma_chan)
204 #define chan_dbg(chan, fmt, arg...) \
205 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
206 #define chan_err(chan, fmt, arg...) \
207 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
209 struct xgene_dma_desc_hw {
216 enum xgene_dma_ring_cfgsize {
217 XGENE_DMA_RING_CFG_SIZE_512B,
218 XGENE_DMA_RING_CFG_SIZE_2KB,
219 XGENE_DMA_RING_CFG_SIZE_16KB,
220 XGENE_DMA_RING_CFG_SIZE_64KB,
221 XGENE_DMA_RING_CFG_SIZE_512KB,
222 XGENE_DMA_RING_CFG_SIZE_INVALID
225 struct xgene_dma_ring {
226 struct xgene_dma *pdma;
236 void __iomem *cmd_base;
237 dma_addr_t desc_paddr;
238 u32 state[XGENE_DMA_RING_NUM_CONFIG];
239 enum xgene_dma_ring_cfgsize cfgsize;
242 struct xgene_dma_desc_hw *desc_hw;
246 struct xgene_dma_desc_sw {
247 struct xgene_dma_desc_hw desc1;
248 struct xgene_dma_desc_hw desc2;
250 struct list_head node;
251 struct list_head tx_list;
252 struct dma_async_tx_descriptor tx;
256 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
257 * @dma_chan: dmaengine channel object member
258 * @pdma: X-Gene DMA device structure reference
259 * @dev: struct device reference for dma mapping api
260 * @id: raw id of this channel
261 * @rx_irq: channel IRQ
262 * @name: name of X-Gene DMA channel
263 * @lock: serializes enqueue/dequeue operations to the descriptor pool
264 * @pending: number of transaction request pushed to DMA controller for
265 * execution, but still waiting for completion,
266 * @max_outstanding: max number of outstanding request we can push to channel
267 * @ld_pending: descriptors which are queued to run, but have not yet been
268 * submitted to the hardware for execution
269 * @ld_running: descriptors which are currently being executing by the hardware
270 * @ld_completed: descriptors which have finished execution by the hardware.
271 * These descriptors have already had their cleanup actions run. They
272 * are waiting for the ACK bit to be set by the async tx API.
273 * @desc_pool: descriptor pool for DMA operations
274 * @tasklet: bottom half where all completed descriptors cleans
275 * @tx_ring: transmit ring descriptor that we use to prepare actual
276 * descriptors for further executions
277 * @rx_ring: receive ring descriptor that we use to get completed DMA
278 * descriptors during cleanup time
280 struct xgene_dma_chan {
281 struct dma_chan dma_chan;
282 struct xgene_dma *pdma;
290 struct list_head ld_pending;
291 struct list_head ld_running;
292 struct list_head ld_completed;
293 struct dma_pool *desc_pool;
294 struct tasklet_struct tasklet;
295 struct xgene_dma_ring tx_ring;
296 struct xgene_dma_ring rx_ring;
300 * struct xgene_dma - internal representation of an X-Gene DMA device
301 * @err_irq: DMA error irq number
302 * @ring_num: start id number for DMA ring
303 * @csr_dma: base for DMA register access
304 * @csr_ring: base for DMA ring register access
305 * @csr_ring_cmd: base for DMA ring command register access
306 * @csr_efuse: base for efuse register access
307 * @dma_dev: embedded struct dma_device
308 * @chan: reference to X-Gene DMA channels
315 void __iomem *csr_dma;
316 void __iomem *csr_ring;
317 void __iomem *csr_ring_cmd;
318 void __iomem *csr_efuse;
319 struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
320 struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
323 static const char * const xgene_dma_desc_err[] = {
324 [ERR_DESC_AXI] = "AXI error when reading src/dst link list",
325 [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
326 [ERR_READ_DATA_AXI] = "AXI error when reading data",
327 [ERR_WRITE_DATA_AXI] = "AXI error when writing data",
328 [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
329 [ERR_ECC] = "ECC double bit error",
330 [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
331 [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
332 [ERR_CRC_ERR] = "CRC error",
333 [ERR_CHKSUM] = "Checksum error",
334 [ERR_DIF] = "DIF error",
337 static const char * const xgene_dma_err[] = {
338 [ERR_DIF_SIZE_INT] = "DIF size error",
339 [ERR_GS_ERR_INT] = "Gather scatter not same size error",
340 [ERR_FPB_TIMEO_INT] = "Free pool time out error",
341 [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
342 [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
343 [ERR_WR_TIMEO_INT] = "Write time out error",
344 [ERR_RD_TIMEO_INT] = "Read time out error",
345 [ERR_WR_ERR_INT] = "HBF bus write error",
346 [ERR_RD_ERR_INT] = "HBF bus read error",
347 [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
348 [ERR_DESC_DST_INT] = "HFB reading dst link address error",
349 [ERR_DESC_SRC_INT] = "HFB reading src link address error",
352 static bool is_pq_enabled(struct xgene_dma *pdma)
356 val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
357 return !(val & XGENE_DMA_PQ_DISABLE_MASK);
360 static u64 xgene_dma_encode_len(size_t len)
362 return (len < XGENE_DMA_MAX_BYTE_CNT) ?
363 ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) :
364 XGENE_DMA_16K_BUFFER_LEN_CODE;
367 static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
369 static u8 flyby_type[] = {
370 FLYBY_2SRC_XOR, /* Dummy */
371 FLYBY_2SRC_XOR, /* Dummy */
378 return flyby_type[src_cnt];
381 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
384 size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
385 *len : XGENE_DMA_MAX_BYTE_CNT;
387 *ext8 |= cpu_to_le64(*paddr);
388 *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes));
393 static void xgene_dma_invalidate_buffer(__le64 *ext8)
395 *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE);
398 static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
410 pr_err("Invalid dma descriptor index\n");
416 static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
419 desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT);
420 desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA <<
421 XGENE_DMA_DESC_RTYPE_POS);
422 desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT);
423 desc->m3 |= cpu_to_le64((u64)dst_ring_num <<
424 XGENE_DMA_DESC_HOENQ_NUM_POS);
427 static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
428 struct xgene_dma_desc_sw *desc_sw,
429 dma_addr_t dst, dma_addr_t src,
432 struct xgene_dma_desc_hw *desc1, *desc2;
435 /* Get 1st descriptor */
436 desc1 = &desc_sw->desc1;
437 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
439 /* Set destination address */
440 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
441 desc1->m3 |= cpu_to_le64(dst);
443 /* Set 1st source address */
444 xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
450 * We need to split this source buffer,
451 * and need to use 2nd descriptor
453 desc2 = &desc_sw->desc2;
454 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
456 /* Set 2nd to 5th source address */
457 for (i = 0; i < 4 && len; i++)
458 xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
461 /* Invalidate unused source address field */
463 xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
465 /* Updated flag that we have prepared 64B descriptor */
466 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
469 static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
470 struct xgene_dma_desc_sw *desc_sw,
471 dma_addr_t *dst, dma_addr_t *src,
472 u32 src_cnt, size_t *nbytes,
475 struct xgene_dma_desc_hw *desc1, *desc2;
476 size_t len = *nbytes;
479 desc1 = &desc_sw->desc1;
480 desc2 = &desc_sw->desc2;
482 /* Initialize DMA descriptor */
483 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
485 /* Set destination address */
486 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
487 desc1->m3 |= cpu_to_le64(*dst);
489 /* We have multiple source addresses, so need to set NV bit*/
490 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
492 /* Set flyby opcode */
493 desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt));
495 /* Set 1st to 5th source addresses */
496 for (i = 0; i < src_cnt; i++) {
498 xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 :
499 xgene_dma_lookup_ext8(desc2, i - 1),
501 desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8)));
504 /* Update meta data */
506 *dst += XGENE_DMA_MAX_BYTE_CNT;
508 /* We need always 64B descriptor to perform xor or pq operations */
509 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
512 static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
514 struct xgene_dma_desc_sw *desc;
515 struct xgene_dma_chan *chan;
521 chan = to_dma_chan(tx->chan);
522 desc = to_dma_desc_sw(tx);
524 spin_lock_bh(&chan->lock);
526 cookie = dma_cookie_assign(tx);
528 /* Add this transaction list onto the tail of the pending queue */
529 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
531 spin_unlock_bh(&chan->lock);
536 static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
537 struct xgene_dma_desc_sw *desc)
539 list_del(&desc->node);
540 chan_dbg(chan, "LD %p free\n", desc);
541 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
544 static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
545 struct xgene_dma_chan *chan)
547 struct xgene_dma_desc_sw *desc;
550 desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys);
552 chan_err(chan, "Failed to allocate LDs\n");
556 memset(desc, 0, sizeof(*desc));
558 INIT_LIST_HEAD(&desc->tx_list);
559 desc->tx.phys = phys;
560 desc->tx.tx_submit = xgene_dma_tx_submit;
561 dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
563 chan_dbg(chan, "LD %p allocated\n", desc);
569 * xgene_dma_clean_completed_descriptor - free all descriptors which
570 * has been completed and acked
571 * @chan: X-Gene DMA channel
573 * This function is used on all completed and acked descriptors.
575 static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
577 struct xgene_dma_desc_sw *desc, *_desc;
579 /* Run the callback for each descriptor, in order */
580 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
581 if (async_tx_test_ack(&desc->tx))
582 xgene_dma_clean_descriptor(chan, desc);
587 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
588 * @chan: X-Gene DMA channel
589 * @desc: descriptor to cleanup and free
591 * This function is used on a descriptor which has been executed by the DMA
592 * controller. It will run any callbacks, submit any dependencies.
594 static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
595 struct xgene_dma_desc_sw *desc)
597 struct dma_async_tx_descriptor *tx = &desc->tx;
600 * If this is not the last transaction in the group,
601 * then no need to complete cookie and run any callback as
602 * this is not the tx_descriptor which had been sent to caller
603 * of this DMA request
609 dma_cookie_complete(tx);
611 /* Run the link descriptor callback function */
613 tx->callback(tx->callback_param);
615 dma_descriptor_unmap(tx);
617 /* Run any dependencies */
618 dma_run_dependencies(tx);
622 * xgene_dma_clean_running_descriptor - move the completed descriptor from
623 * ld_running to ld_completed
624 * @chan: X-Gene DMA channel
625 * @desc: the descriptor which is completed
627 * Free the descriptor directly if acked by async_tx api,
628 * else move it to queue ld_completed.
630 static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
631 struct xgene_dma_desc_sw *desc)
633 /* Remove from the list of running transactions */
634 list_del(&desc->node);
637 * the client is allowed to attach dependent operations
640 if (!async_tx_test_ack(&desc->tx)) {
642 * Move this descriptor to the list of descriptors which is
643 * completed, but still awaiting the 'ack' bit to be set.
645 list_add_tail(&desc->node, &chan->ld_completed);
649 chan_dbg(chan, "LD %p free\n", desc);
650 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
653 static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
654 struct xgene_dma_desc_sw *desc_sw)
656 struct xgene_dma_ring *ring = &chan->tx_ring;
657 struct xgene_dma_desc_hw *desc_hw;
659 /* Get hw descriptor from DMA tx ring */
660 desc_hw = &ring->desc_hw[ring->head];
663 * Increment the head count to point next
664 * descriptor for next time
666 if (++ring->head == ring->slots)
669 /* Copy prepared sw descriptor data to hw descriptor */
670 memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
673 * Check if we have prepared 64B descriptor,
674 * in this case we need one more hw descriptor
676 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
677 desc_hw = &ring->desc_hw[ring->head];
679 if (++ring->head == ring->slots)
682 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
685 /* Increment the pending transaction count */
686 chan->pending += ((desc_sw->flags &
687 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
689 /* Notify the hw that we have descriptor ready for execution */
690 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
695 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
696 * @chan : X-Gene DMA channel
698 * LOCKING: must hold chan->lock
700 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
702 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
705 * If the list of pending descriptors is empty, then we
706 * don't need to do any work at all
708 if (list_empty(&chan->ld_pending)) {
709 chan_dbg(chan, "No pending LDs\n");
714 * Move elements from the queue of pending transactions onto the list
715 * of running transactions and push it to hw for further executions
717 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
719 * Check if have pushed max number of transactions to hw
720 * as capable, so let's stop here and will push remaining
721 * elements from pening ld queue after completing some
722 * descriptors that we have already pushed
724 if (chan->pending >= chan->max_outstanding)
727 xgene_chan_xfer_request(chan, desc_sw);
730 * Delete this element from ld pending queue and append it to
733 list_move_tail(&desc_sw->node, &chan->ld_running);
738 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
739 * and move them to ld_completed to free until flag 'ack' is set
740 * @chan: X-Gene DMA channel
742 * This function is used on descriptors which have been executed by the DMA
743 * controller. It will run any callbacks, submit any dependencies, then
744 * free these descriptors if flag 'ack' is set.
746 static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
748 struct xgene_dma_ring *ring = &chan->rx_ring;
749 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
750 struct xgene_dma_desc_hw *desc_hw;
751 struct list_head ld_completed;
754 INIT_LIST_HEAD(&ld_completed);
756 spin_lock_bh(&chan->lock);
758 /* Clean already completed and acked descriptors */
759 xgene_dma_clean_completed_descriptor(chan);
761 /* Move all completed descriptors to ld completed queue, in order */
762 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
763 /* Get subsequent hw descriptor from DMA rx ring */
764 desc_hw = &ring->desc_hw[ring->head];
766 /* Check if this descriptor has been completed */
767 if (unlikely(le64_to_cpu(desc_hw->m0) ==
768 XGENE_DMA_DESC_EMPTY_SIGNATURE))
771 if (++ring->head == ring->slots)
774 /* Check if we have any error with DMA transactions */
775 status = XGENE_DMA_DESC_STATUS(
776 XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
778 XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
781 /* Print the DMA error type */
782 chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
785 * We have DMA transactions error here. Dump DMA Tx
786 * and Rx descriptors for this request */
787 XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
788 "X-Gene DMA TX DESC1: ");
790 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
791 XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
792 "X-Gene DMA TX DESC2: ");
794 XGENE_DMA_DESC_DUMP(desc_hw,
795 "X-Gene DMA RX ERR DESC: ");
798 /* Notify the hw about this completed descriptor */
799 iowrite32(-1, ring->cmd);
801 /* Mark this hw descriptor as processed */
802 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
805 * Decrement the pending transaction count
806 * as we have processed one
808 chan->pending -= ((desc_sw->flags &
809 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
812 * Delete this node from ld running queue and append it to
813 * ld completed queue for further processing
815 list_move_tail(&desc_sw->node, &ld_completed);
819 * Start any pending transactions automatically
820 * In the ideal case, we keep the DMA controller busy while we go
821 * ahead and free the descriptors below.
823 xgene_chan_xfer_ld_pending(chan);
825 spin_unlock_bh(&chan->lock);
827 /* Run the callback for each descriptor, in order */
828 list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
829 xgene_dma_run_tx_complete_actions(chan, desc_sw);
830 xgene_dma_clean_running_descriptor(chan, desc_sw);
834 static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
836 struct xgene_dma_chan *chan = to_dma_chan(dchan);
838 /* Has this channel already been allocated? */
842 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
843 sizeof(struct xgene_dma_desc_sw),
845 if (!chan->desc_pool) {
846 chan_err(chan, "Failed to allocate descriptor pool\n");
850 chan_dbg(chan, "Allocate descripto pool\n");
856 * xgene_dma_free_desc_list - Free all descriptors in a queue
857 * @chan: X-Gene DMA channel
858 * @list: the list to free
860 * LOCKING: must hold chan->lock
862 static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
863 struct list_head *list)
865 struct xgene_dma_desc_sw *desc, *_desc;
867 list_for_each_entry_safe(desc, _desc, list, node)
868 xgene_dma_clean_descriptor(chan, desc);
871 static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
873 struct xgene_dma_chan *chan = to_dma_chan(dchan);
875 chan_dbg(chan, "Free all resources\n");
877 if (!chan->desc_pool)
880 /* Process all running descriptor */
881 xgene_dma_cleanup_descriptors(chan);
883 spin_lock_bh(&chan->lock);
885 /* Clean all link descriptor queues */
886 xgene_dma_free_desc_list(chan, &chan->ld_pending);
887 xgene_dma_free_desc_list(chan, &chan->ld_running);
888 xgene_dma_free_desc_list(chan, &chan->ld_completed);
890 spin_unlock_bh(&chan->lock);
892 /* Delete this channel DMA pool */
893 dma_pool_destroy(chan->desc_pool);
894 chan->desc_pool = NULL;
897 static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
898 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
899 size_t len, unsigned long flags)
901 struct xgene_dma_desc_sw *first = NULL, *new;
902 struct xgene_dma_chan *chan;
905 if (unlikely(!dchan || !len))
908 chan = to_dma_chan(dchan);
911 /* Allocate the link descriptor from DMA pool */
912 new = xgene_dma_alloc_descriptor(chan);
916 /* Create the largest transaction possible */
917 copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
919 /* Prepare DMA descriptor */
920 xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
926 async_tx_ack(&new->tx);
928 /* Update metadata */
933 /* Insert the link descriptor to the LD ring */
934 list_add_tail(&new->node, &first->tx_list);
937 new->tx.flags = flags; /* client is in control of this ack */
938 new->tx.cookie = -EBUSY;
939 list_splice(&first->tx_list, &new->tx_list);
947 xgene_dma_free_desc_list(chan, &first->tx_list);
951 static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
952 struct dma_chan *dchan, struct scatterlist *dst_sg,
953 u32 dst_nents, struct scatterlist *src_sg,
954 u32 src_nents, unsigned long flags)
956 struct xgene_dma_desc_sw *first = NULL, *new = NULL;
957 struct xgene_dma_chan *chan;
958 size_t dst_avail, src_avail;
962 if (unlikely(!dchan))
965 if (unlikely(!dst_nents || !src_nents))
968 if (unlikely(!dst_sg || !src_sg))
971 chan = to_dma_chan(dchan);
973 /* Get prepared for the loop */
974 dst_avail = sg_dma_len(dst_sg);
975 src_avail = sg_dma_len(src_sg);
979 /* Run until we are out of scatterlist entries */
981 /* Create the largest transaction possible */
982 len = min_t(size_t, src_avail, dst_avail);
983 len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
987 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
988 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
990 /* Allocate the link descriptor from DMA pool */
991 new = xgene_dma_alloc_descriptor(chan);
995 /* Prepare DMA descriptor */
996 xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
1002 async_tx_ack(&new->tx);
1004 /* update metadata */
1008 /* Insert the link descriptor to the LD ring */
1009 list_add_tail(&new->node, &first->tx_list);
1012 /* fetch the next dst scatterlist entry */
1013 if (dst_avail == 0) {
1014 /* no more entries: we're done */
1018 /* fetch the next entry: if there are no more: done */
1019 dst_sg = sg_next(dst_sg);
1024 dst_avail = sg_dma_len(dst_sg);
1027 /* fetch the next src scatterlist entry */
1028 if (src_avail == 0) {
1029 /* no more entries: we're done */
1033 /* fetch the next entry: if there are no more: done */
1034 src_sg = sg_next(src_sg);
1039 src_avail = sg_dma_len(src_sg);
1046 new->tx.flags = flags; /* client is in control of this ack */
1047 new->tx.cookie = -EBUSY;
1048 list_splice(&first->tx_list, &new->tx_list);
1055 xgene_dma_free_desc_list(chan, &first->tx_list);
1059 static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
1060 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
1061 u32 src_cnt, size_t len, unsigned long flags)
1063 struct xgene_dma_desc_sw *first = NULL, *new;
1064 struct xgene_dma_chan *chan;
1065 static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
1066 0x01, 0x01, 0x01, 0x01, 0x01};
1068 if (unlikely(!dchan || !len))
1071 chan = to_dma_chan(dchan);
1074 /* Allocate the link descriptor from DMA pool */
1075 new = xgene_dma_alloc_descriptor(chan);
1079 /* Prepare xor DMA descriptor */
1080 xgene_dma_prep_xor_desc(chan, new, &dst, src,
1081 src_cnt, &len, multi);
1087 async_tx_ack(&new->tx);
1089 /* Insert the link descriptor to the LD ring */
1090 list_add_tail(&new->node, &first->tx_list);
1093 new->tx.flags = flags; /* client is in control of this ack */
1094 new->tx.cookie = -EBUSY;
1095 list_splice(&first->tx_list, &new->tx_list);
1103 xgene_dma_free_desc_list(chan, &first->tx_list);
1107 static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
1108 struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1109 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1111 struct xgene_dma_desc_sw *first = NULL, *new;
1112 struct xgene_dma_chan *chan;
1114 dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
1115 static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
1117 if (unlikely(!dchan || !len))
1120 chan = to_dma_chan(dchan);
1123 * Save source addresses on local variable, may be we have to
1124 * prepare two descriptor to generate P and Q if both enabled
1125 * in the flags by client
1127 memcpy(_src, src, sizeof(*src) * src_cnt);
1129 if (flags & DMA_PREP_PQ_DISABLE_P)
1132 if (flags & DMA_PREP_PQ_DISABLE_Q)
1136 /* Allocate the link descriptor from DMA pool */
1137 new = xgene_dma_alloc_descriptor(chan);
1145 async_tx_ack(&new->tx);
1147 /* Insert the link descriptor to the LD ring */
1148 list_add_tail(&new->node, &first->tx_list);
1151 * Prepare DMA descriptor to generate P,
1152 * if DMA_PREP_PQ_DISABLE_P flag is not set
1155 xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
1156 src_cnt, &len, multi);
1161 * Prepare DMA descriptor to generate Q,
1162 * if DMA_PREP_PQ_DISABLE_Q flag is not set
1165 xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
1166 src_cnt, &_len, scf);
1168 } while (len || _len);
1170 new->tx.flags = flags; /* client is in control of this ack */
1171 new->tx.cookie = -EBUSY;
1172 list_splice(&first->tx_list, &new->tx_list);
1180 xgene_dma_free_desc_list(chan, &first->tx_list);
1184 static void xgene_dma_issue_pending(struct dma_chan *dchan)
1186 struct xgene_dma_chan *chan = to_dma_chan(dchan);
1188 spin_lock_bh(&chan->lock);
1189 xgene_chan_xfer_ld_pending(chan);
1190 spin_unlock_bh(&chan->lock);
1193 static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
1194 dma_cookie_t cookie,
1195 struct dma_tx_state *txstate)
1197 return dma_cookie_status(dchan, cookie, txstate);
1200 static void xgene_dma_tasklet_cb(unsigned long data)
1202 struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
1204 /* Run all cleanup for descriptors which have been completed */
1205 xgene_dma_cleanup_descriptors(chan);
1207 /* Re-enable DMA channel IRQ */
1208 enable_irq(chan->rx_irq);
1211 static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
1213 struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
1218 * Disable DMA channel IRQ until we process completed
1221 disable_irq_nosync(chan->rx_irq);
1224 * Schedule the tasklet to handle all cleanup of the current
1225 * transaction. It will start a new transaction if there is
1228 tasklet_schedule(&chan->tasklet);
1233 static irqreturn_t xgene_dma_err_isr(int irq, void *id)
1235 struct xgene_dma *pdma = (struct xgene_dma *)id;
1236 unsigned long int_mask;
1239 val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
1241 /* Clear DMA interrupts */
1242 iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
1244 /* Print DMA error info */
1245 int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
1246 for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
1248 "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
1253 static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
1257 iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
1259 for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
1260 iowrite32(ring->state[i], ring->pdma->csr_ring +
1261 XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
1264 static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
1266 memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
1267 xgene_dma_wr_ring_state(ring);
1270 static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
1272 void *ring_cfg = ring->state;
1273 u64 addr = ring->desc_paddr;
1276 ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
1278 /* Clear DMA ring state */
1279 xgene_dma_clr_ring_state(ring);
1281 /* Set DMA ring type */
1282 XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
1284 if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
1285 /* Set recombination buffer and timeout */
1286 XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
1287 XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
1288 XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
1291 /* Initialize DMA ring state */
1292 XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
1293 XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
1294 XGENE_DMA_RING_COHERENT_SET(ring_cfg);
1295 XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
1296 XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
1297 XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
1299 /* Write DMA ring configurations */
1300 xgene_dma_wr_ring_state(ring);
1302 /* Set DMA ring id */
1303 iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
1304 ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1306 /* Set DMA ring buffer */
1307 iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
1308 ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1310 if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
1313 /* Set empty signature to DMA Rx ring descriptors */
1314 for (i = 0; i < ring->slots; i++) {
1315 struct xgene_dma_desc_hw *desc;
1317 desc = &ring->desc_hw[i];
1318 desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
1321 /* Enable DMA Rx ring interrupt */
1322 val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1323 XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
1324 iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1327 static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
1331 if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
1332 /* Disable DMA Rx ring interrupt */
1333 val = ioread32(ring->pdma->csr_ring +
1334 XGENE_DMA_RING_NE_INT_MODE);
1335 XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
1336 iowrite32(val, ring->pdma->csr_ring +
1337 XGENE_DMA_RING_NE_INT_MODE);
1340 /* Clear DMA ring state */
1341 ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
1342 iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1344 iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1345 xgene_dma_clr_ring_state(ring);
1348 static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
1350 ring->cmd_base = ring->pdma->csr_ring_cmd +
1351 XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
1352 XGENE_DMA_RING_NUM));
1354 ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
1357 static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
1358 enum xgene_dma_ring_cfgsize cfgsize)
1363 case XGENE_DMA_RING_CFG_SIZE_512B:
1366 case XGENE_DMA_RING_CFG_SIZE_2KB:
1369 case XGENE_DMA_RING_CFG_SIZE_16KB:
1372 case XGENE_DMA_RING_CFG_SIZE_64KB:
1375 case XGENE_DMA_RING_CFG_SIZE_512KB:
1379 chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
1386 static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
1388 /* Clear DMA ring configurations */
1389 xgene_dma_clear_ring(ring);
1391 /* De-allocate DMA ring descriptor */
1392 if (ring->desc_vaddr) {
1393 dma_free_coherent(ring->pdma->dev, ring->size,
1394 ring->desc_vaddr, ring->desc_paddr);
1395 ring->desc_vaddr = NULL;
1399 static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
1401 xgene_dma_delete_ring_one(&chan->rx_ring);
1402 xgene_dma_delete_ring_one(&chan->tx_ring);
1405 static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1406 struct xgene_dma_ring *ring,
1407 enum xgene_dma_ring_cfgsize cfgsize)
1411 /* Setup DMA ring descriptor variables */
1412 ring->pdma = chan->pdma;
1413 ring->cfgsize = cfgsize;
1414 ring->num = chan->pdma->ring_num++;
1415 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
1417 ret = xgene_dma_get_ring_size(chan, cfgsize);
1422 /* Allocate memory for DMA ring descriptor */
1423 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
1424 &ring->desc_paddr, GFP_KERNEL);
1425 if (!ring->desc_vaddr) {
1426 chan_err(chan, "Failed to allocate ring desc\n");
1430 /* Configure and enable DMA ring */
1431 xgene_dma_set_ring_cmd(ring);
1432 xgene_dma_setup_ring(ring);
1437 static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
1439 struct xgene_dma_ring *rx_ring = &chan->rx_ring;
1440 struct xgene_dma_ring *tx_ring = &chan->tx_ring;
1443 /* Create DMA Rx ring descriptor */
1444 rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
1445 rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
1447 ret = xgene_dma_create_ring_one(chan, rx_ring,
1448 XGENE_DMA_RING_CFG_SIZE_64KB);
1452 chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
1453 rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
1455 /* Create DMA Tx ring descriptor */
1456 tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
1457 tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
1459 ret = xgene_dma_create_ring_one(chan, tx_ring,
1460 XGENE_DMA_RING_CFG_SIZE_64KB);
1462 xgene_dma_delete_ring_one(rx_ring);
1466 tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
1469 "Tx ring id 0x%X num %d desc 0x%p\n",
1470 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
1472 /* Set the max outstanding request possible to this channel */
1473 chan->max_outstanding = tx_ring->slots;
1478 static int xgene_dma_init_rings(struct xgene_dma *pdma)
1482 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1483 ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
1485 for (j = 0; j < i; j++)
1486 xgene_dma_delete_chan_rings(&pdma->chan[j]);
1494 static void xgene_dma_enable(struct xgene_dma *pdma)
1498 /* Configure and enable DMA engine */
1499 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1500 XGENE_DMA_CH_SETUP(val);
1501 XGENE_DMA_ENABLE(val);
1502 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1505 static void xgene_dma_disable(struct xgene_dma *pdma)
1509 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1510 XGENE_DMA_DISABLE(val);
1511 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1514 static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
1517 * Mask DMA ring overflow, underflow and
1518 * AXI write/read error interrupts
1520 iowrite32(XGENE_DMA_INT_ALL_MASK,
1521 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1522 iowrite32(XGENE_DMA_INT_ALL_MASK,
1523 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1524 iowrite32(XGENE_DMA_INT_ALL_MASK,
1525 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1526 iowrite32(XGENE_DMA_INT_ALL_MASK,
1527 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1528 iowrite32(XGENE_DMA_INT_ALL_MASK,
1529 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1531 /* Mask DMA error interrupts */
1532 iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
1535 static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
1538 * Unmask DMA ring overflow, underflow and
1539 * AXI write/read error interrupts
1541 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1542 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1543 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1544 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1545 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1546 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1547 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1548 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1549 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1550 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1552 /* Unmask DMA error interrupts */
1553 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1554 pdma->csr_dma + XGENE_DMA_INT_MASK);
1557 static void xgene_dma_init_hw(struct xgene_dma *pdma)
1561 /* Associate DMA ring to corresponding ring HW */
1562 iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
1563 pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
1565 /* Configure RAID6 polynomial control setting */
1566 if (is_pq_enabled(pdma))
1567 iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
1568 pdma->csr_dma + XGENE_DMA_RAID6_CONT);
1570 dev_info(pdma->dev, "PQ is disabled in HW\n");
1572 xgene_dma_enable(pdma);
1573 xgene_dma_unmask_interrupts(pdma);
1575 /* Get DMA id and version info */
1576 val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
1578 /* DMA device info */
1580 "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
1581 XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
1582 XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
1585 static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
1587 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
1588 (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
1591 iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
1592 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
1594 /* Bring up memory */
1595 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1597 /* Force a barrier */
1598 ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1600 /* reset may take up to 1ms */
1601 usleep_range(1000, 1100);
1603 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
1604 != XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
1606 "Failed to release ring mngr memory from shutdown\n");
1610 /* program threshold set 1 and all hysteresis */
1611 iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
1612 pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
1613 iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
1614 pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
1615 iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
1616 pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
1618 /* Enable QPcore and assign error queue */
1619 iowrite32(XGENE_DMA_RING_ENABLE,
1620 pdma->csr_ring + XGENE_DMA_RING_CONFIG);
1625 static int xgene_dma_init_mem(struct xgene_dma *pdma)
1629 ret = xgene_dma_init_ring_mngr(pdma);
1633 /* Bring up memory */
1634 iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1636 /* Force a barrier */
1637 ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1639 /* reset may take up to 1ms */
1640 usleep_range(1000, 1100);
1642 if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
1643 != XGENE_DMA_BLK_MEM_RDY_VAL) {
1645 "Failed to release DMA memory from shutdown\n");
1652 static int xgene_dma_request_irqs(struct xgene_dma *pdma)
1654 struct xgene_dma_chan *chan;
1657 /* Register DMA error irq */
1658 ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
1659 0, "dma_error", pdma);
1662 "Failed to register error IRQ %d\n", pdma->err_irq);
1666 /* Register DMA channel rx irq */
1667 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1668 chan = &pdma->chan[i];
1669 ret = devm_request_irq(chan->dev, chan->rx_irq,
1670 xgene_dma_chan_ring_isr,
1671 0, chan->name, chan);
1673 chan_err(chan, "Failed to register Rx IRQ %d\n",
1675 devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1677 for (j = 0; j < i; j++) {
1678 chan = &pdma->chan[i];
1679 devm_free_irq(chan->dev, chan->rx_irq, chan);
1689 static void xgene_dma_free_irqs(struct xgene_dma *pdma)
1691 struct xgene_dma_chan *chan;
1694 /* Free DMA device error irq */
1695 devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1697 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1698 chan = &pdma->chan[i];
1699 devm_free_irq(chan->dev, chan->rx_irq, chan);
1703 static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
1704 struct dma_device *dma_dev)
1706 /* Initialize DMA device capability mask */
1707 dma_cap_zero(dma_dev->cap_mask);
1709 /* Set DMA device capability */
1710 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1711 dma_cap_set(DMA_SG, dma_dev->cap_mask);
1713 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
1714 * and channel 1 supports XOR, PQ both. First thing here is we have
1715 * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
1716 * we can make sure this by reading SoC Efuse register.
1717 * Second thing, we have hw errata that if we run channel 0 and
1718 * channel 1 simultaneously with executing XOR and PQ request,
1719 * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
1720 * if XOR and PQ supports on channel 1 is disabled.
1722 if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
1723 is_pq_enabled(chan->pdma)) {
1724 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1725 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1726 } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
1727 !is_pq_enabled(chan->pdma)) {
1728 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1731 /* Set base and prep routines */
1732 dma_dev->dev = chan->dev;
1733 dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
1734 dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
1735 dma_dev->device_issue_pending = xgene_dma_issue_pending;
1736 dma_dev->device_tx_status = xgene_dma_tx_status;
1737 dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
1738 dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
1740 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1741 dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
1742 dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
1743 dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
1746 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1747 dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
1748 dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
1749 dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
1753 static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
1755 struct xgene_dma_chan *chan = &pdma->chan[id];
1756 struct dma_device *dma_dev = &pdma->dma_dev[id];
1759 chan->dma_chan.device = dma_dev;
1761 spin_lock_init(&chan->lock);
1762 INIT_LIST_HEAD(&chan->ld_pending);
1763 INIT_LIST_HEAD(&chan->ld_running);
1764 INIT_LIST_HEAD(&chan->ld_completed);
1765 tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
1766 (unsigned long)chan);
1769 chan->desc_pool = NULL;
1770 dma_cookie_init(&chan->dma_chan);
1772 /* Setup dma device capabilities and prep routines */
1773 xgene_dma_set_caps(chan, dma_dev);
1775 /* Initialize DMA device list head */
1776 INIT_LIST_HEAD(&dma_dev->channels);
1777 list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
1779 /* Register with Linux async DMA framework*/
1780 ret = dma_async_device_register(dma_dev);
1782 chan_err(chan, "Failed to register async device %d", ret);
1783 tasklet_kill(&chan->tasklet);
1788 /* DMA capability info */
1790 "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan),
1791 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
1792 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
1793 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
1794 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
1799 static int xgene_dma_init_async(struct xgene_dma *pdma)
1803 for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
1804 ret = xgene_dma_async_register(pdma, i);
1806 for (j = 0; j < i; j++) {
1807 dma_async_device_unregister(&pdma->dma_dev[j]);
1808 tasklet_kill(&pdma->chan[j].tasklet);
1818 static void xgene_dma_async_unregister(struct xgene_dma *pdma)
1822 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
1823 dma_async_device_unregister(&pdma->dma_dev[i]);
1826 static void xgene_dma_init_channels(struct xgene_dma *pdma)
1828 struct xgene_dma_chan *chan;
1831 pdma->ring_num = XGENE_DMA_RING_NUM;
1833 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1834 chan = &pdma->chan[i];
1835 chan->dev = pdma->dev;
1838 snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
1842 static int xgene_dma_get_resources(struct platform_device *pdev,
1843 struct xgene_dma *pdma)
1845 struct resource *res;
1848 /* Get DMA csr region */
1849 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1851 dev_err(&pdev->dev, "Failed to get csr region\n");
1855 pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
1856 resource_size(res));
1857 if (!pdma->csr_dma) {
1858 dev_err(&pdev->dev, "Failed to ioremap csr region");
1862 /* Get DMA ring csr region */
1863 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1865 dev_err(&pdev->dev, "Failed to get ring csr region\n");
1869 pdma->csr_ring = devm_ioremap(&pdev->dev, res->start,
1870 resource_size(res));
1871 if (!pdma->csr_ring) {
1872 dev_err(&pdev->dev, "Failed to ioremap ring csr region");
1876 /* Get DMA ring cmd csr region */
1877 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1879 dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
1883 pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
1884 resource_size(res));
1885 if (!pdma->csr_ring_cmd) {
1886 dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
1890 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1892 /* Get efuse csr region */
1893 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1895 dev_err(&pdev->dev, "Failed to get efuse csr region\n");
1899 pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
1900 resource_size(res));
1901 if (!pdma->csr_efuse) {
1902 dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
1906 /* Get DMA error interrupt */
1907 irq = platform_get_irq(pdev, 0);
1909 dev_err(&pdev->dev, "Failed to get Error IRQ\n");
1913 pdma->err_irq = irq;
1915 /* Get DMA Rx ring descriptor interrupts for all DMA channels */
1916 for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
1917 irq = platform_get_irq(pdev, i);
1919 dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
1923 pdma->chan[i - 1].rx_irq = irq;
1929 static int xgene_dma_probe(struct platform_device *pdev)
1931 struct xgene_dma *pdma;
1934 pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
1938 pdma->dev = &pdev->dev;
1939 platform_set_drvdata(pdev, pdma);
1941 ret = xgene_dma_get_resources(pdev, pdma);
1945 pdma->clk = devm_clk_get(&pdev->dev, NULL);
1946 if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
1947 dev_err(&pdev->dev, "Failed to get clk\n");
1948 return PTR_ERR(pdma->clk);
1951 /* Enable clk before accessing registers */
1952 if (!IS_ERR(pdma->clk)) {
1953 ret = clk_prepare_enable(pdma->clk);
1955 dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
1960 /* Remove DMA RAM out of shutdown */
1961 ret = xgene_dma_init_mem(pdma);
1963 goto err_clk_enable;
1965 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
1967 dev_err(&pdev->dev, "No usable DMA configuration\n");
1971 /* Initialize DMA channels software state */
1972 xgene_dma_init_channels(pdma);
1974 /* Configue DMA rings */
1975 ret = xgene_dma_init_rings(pdma);
1977 goto err_clk_enable;
1979 ret = xgene_dma_request_irqs(pdma);
1981 goto err_request_irq;
1983 /* Configure and enable DMA engine */
1984 xgene_dma_init_hw(pdma);
1986 /* Register DMA device with linux async framework */
1987 ret = xgene_dma_init_async(pdma);
1989 goto err_async_init;
1994 xgene_dma_free_irqs(pdma);
1997 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
1998 xgene_dma_delete_chan_rings(&pdma->chan[i]);
2002 if (!IS_ERR(pdma->clk))
2003 clk_disable_unprepare(pdma->clk);
2008 static int xgene_dma_remove(struct platform_device *pdev)
2010 struct xgene_dma *pdma = platform_get_drvdata(pdev);
2011 struct xgene_dma_chan *chan;
2014 xgene_dma_async_unregister(pdma);
2016 /* Mask interrupts and disable DMA engine */
2017 xgene_dma_mask_interrupts(pdma);
2018 xgene_dma_disable(pdma);
2019 xgene_dma_free_irqs(pdma);
2021 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
2022 chan = &pdma->chan[i];
2023 tasklet_kill(&chan->tasklet);
2024 xgene_dma_delete_chan_rings(chan);
2027 if (!IS_ERR(pdma->clk))
2028 clk_disable_unprepare(pdma->clk);
2034 static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
2038 MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
2041 static const struct of_device_id xgene_dma_of_match_ptr[] = {
2042 {.compatible = "apm,xgene-storm-dma",},
2045 MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
2047 static struct platform_driver xgene_dma_driver = {
2048 .probe = xgene_dma_probe,
2049 .remove = xgene_dma_remove,
2051 .name = "X-Gene-DMA",
2052 .of_match_table = xgene_dma_of_match_ptr,
2053 .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
2057 module_platform_driver(xgene_dma_driver);
2059 MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
2060 MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
2061 MODULE_AUTHOR("Loc Ho <lho@apm.com>");
2062 MODULE_LICENSE("GPL");
2063 MODULE_VERSION("1.0");