2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/dmaengine.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
17 #include <plat/ste_dma40.h>
19 #include "ste_dma40_ll.h"
21 #define D40_NAME "dma40"
23 #define D40_PHY_CHAN -1
25 /* For masking out/in 2 bit channel positions */
26 #define D40_CHAN_POS(chan) (2 * (chan / 2))
27 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29 /* Maximum iterations taken before giving up suspending a channel */
30 #define D40_SUSPEND_MAX_IT 500
32 /* Hardware requirement on LCLA alignment */
33 #define LCLA_ALIGNMENT 0x40000
35 /* Max number of links per event group */
36 #define D40_LCLA_LINK_PER_EVENT_GRP 128
37 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
39 /* Attempts before giving up to trying to get pages that are aligned */
40 #define MAX_LCLA_ALLOC_ATTEMPTS 256
42 /* Bit markings for allocation map */
43 #define D40_ALLOC_FREE (1 << 31)
44 #define D40_ALLOC_PHY (1 << 30)
45 #define D40_ALLOC_LOG_FREE 0
47 /* Hardware designer of the block */
48 #define D40_HW_DESIGNER 0x8
51 * enum 40_command - The different commands and/or statuses.
53 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
54 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
55 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
56 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
61 D40_DMA_SUSPEND_REQ = 2,
66 * struct d40_lli_pool - Structure for keeping LLIs in memory
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used.
71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
73 * one buffer to one buffer.
78 /* Space for dst and src, plus an extra for padding */
79 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
83 * struct d40_desc - A descriptor is one DMA job.
85 * @lli_phy: LLI settings for physical channel. Both src and dst=
86 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
88 * @lli_log: Same as above but for logical channels.
89 * @lli_pool: The pool with two entries pre-allocated.
90 * @lli_len: Number of llis of current descriptor.
91 * @lli_current: Number of transfered llis.
92 * @lcla_alloc: Number of LCLA entries allocated.
93 * @txd: DMA engine struct. Used for among other things for communication
96 * @is_in_client_list: true if the client owns this descriptor.
99 * This descriptor is used for both logical and physical transfers.
103 struct d40_phy_lli_bidir lli_phy;
105 struct d40_log_lli_bidir lli_log;
107 struct d40_lli_pool lli_pool;
112 struct dma_async_tx_descriptor txd;
113 struct list_head node;
115 bool is_in_client_list;
119 * struct d40_lcla_pool - LCLA pool settings and data.
121 * @base: The virtual address of LCLA. 18 bit aligned.
122 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
123 * This pointer is only there for clean-up on error.
124 * @pages: The number of pages needed for all physical channels.
125 * Only used later for clean-up on error
126 * @lock: Lock to protect the content in this struct.
127 * @alloc_map: big map over which LCLA entry is own by which job.
129 struct d40_lcla_pool {
131 void *base_unaligned;
134 struct d40_desc **alloc_map;
138 * struct d40_phy_res - struct for handling eventlines mapped to physical
141 * @lock: A lock protection this entity.
142 * @num: The physical channel number of this entity.
143 * @allocated_src: Bit mapped to show which src event line's are mapped to
144 * this physical channel. Can also be free or physically allocated.
145 * @allocated_dst: Same as for src but is dst.
146 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
159 * struct d40_chan - Struct that describes a channel.
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
167 * @busy: Set to true when transfer is ongoing on this channel.
168 * @phy_chan: Pointer to physical channel which this instance runs on. If this
169 * point is NULL, then the channel is not allocated.
170 * @chan: DMA engine handle.
171 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
172 * transfer and call client callback.
173 * @client: Cliented owned descriptor list.
174 * @active: Active descriptor.
175 * @queue: Queued jobs.
176 * @dma_cfg: The client configuration of this dma channel.
177 * @configured: whether the dma_cfg configuration is valid
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
185 * This struct can either "be" a logical or a physical channel.
190 /* ID of the most recent completed transfer */
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
200 struct stedma40_chan_cfg dma_cfg;
202 struct d40_base *base;
203 /* Default register configurations */
206 struct d40_def_lcsp log_def;
207 struct d40_log_lli_full *lcpa;
208 /* Runtime reconfiguration */
209 dma_addr_t runtime_addr;
210 enum dma_data_direction runtime_direction;
214 * struct d40_base - The big global struct, one for each probe'd instance.
216 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
217 * @execmd_lock: Lock for execute command usage since several channels share
218 * the same physical register.
219 * @dev: The device structure.
220 * @virtbase: The virtual base address of the DMA's register.
221 * @rev: silicon revision detected.
222 * @clk: Pointer to the DMA clock structure.
223 * @phy_start: Physical memory start of the DMA registers.
224 * @phy_size: Size of the DMA register map.
225 * @irq: The IRQ number.
226 * @num_phy_chans: The number of physical channels. Read from HW. This
227 * is the number of available channels for this driver, not counting "Secure
228 * mode" allocated physical channels.
229 * @num_log_chans: The number of logical channels. Calculated from
231 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
232 * @dma_slave: dma_device channels that can do only do slave transfers.
233 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
234 * @log_chans: Room for all possible logical channels in system.
235 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
236 * to log_chans entries.
237 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
238 * to phy_chans entries.
239 * @plat_data: Pointer to provided platform_data which is the driver
241 * @phy_res: Vector containing all physical channels.
242 * @lcla_pool: lcla pool settings and data.
243 * @lcpa_base: The virtual mapped address of LCPA.
244 * @phy_lcpa: The physical address of the LCPA.
245 * @lcpa_size: The size of the LCPA area.
246 * @desc_slab: cache for descriptors.
249 spinlock_t interrupt_lock;
250 spinlock_t execmd_lock;
252 void __iomem *virtbase;
255 phys_addr_t phy_start;
256 resource_size_t phy_size;
260 struct dma_device dma_both;
261 struct dma_device dma_slave;
262 struct dma_device dma_memcpy;
263 struct d40_chan *phy_chans;
264 struct d40_chan *log_chans;
265 struct d40_chan **lookup_log_chans;
266 struct d40_chan **lookup_phy_chans;
267 struct stedma40_platform_data *plat_data;
268 /* Physical half channels */
269 struct d40_phy_res *phy_res;
270 struct d40_lcla_pool lcla_pool;
273 resource_size_t lcpa_size;
274 struct kmem_cache *desc_slab;
278 * struct d40_interrupt_lookup - lookup table for interrupt handler
280 * @src: Interrupt mask register.
281 * @clr: Interrupt clear register.
282 * @is_error: true if this is an error interrupt.
283 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
284 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
286 struct d40_interrupt_lookup {
294 * struct d40_reg_val - simple lookup struct
296 * @reg: The register.
297 * @val: The value that belongs to the register in reg.
304 static struct device *chan2dev(struct d40_chan *d40c)
306 return &d40c->chan.dev->device;
309 static bool chan_is_physical(struct d40_chan *chan)
311 return chan->log_num == D40_PHY_CHAN;
314 static bool chan_is_logical(struct d40_chan *chan)
316 return !chan_is_physical(chan);
319 static void __iomem *chan_base(struct d40_chan *chan)
321 return chan->base->virtbase + D40_DREG_PCBASE +
322 chan->phy_chan->num * D40_DREG_PCDELTA;
325 #define d40_err(dev, format, arg...) \
326 dev_err(dev, "[%s] " format, __func__, ## arg)
328 #define chan_err(d40c, format, arg...) \
329 d40_err(chan2dev(d40c), format, ## arg)
331 static int d40_pool_lli_alloc(struct d40_desc *d40d,
332 int lli_len, bool is_log)
338 align = sizeof(struct d40_log_lli);
340 align = sizeof(struct d40_phy_lli);
343 base = d40d->lli_pool.pre_alloc_lli;
344 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
345 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = lli_len * 2 * align;
349 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
350 d40d->lli_pool.base = base;
352 if (d40d->lli_pool.base == NULL)
357 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
359 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
361 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
363 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
369 static void d40_pool_lli_free(struct d40_desc *d40d)
371 kfree(d40d->lli_pool.base);
372 d40d->lli_pool.base = NULL;
373 d40d->lli_pool.size = 0;
374 d40d->lli_log.src = NULL;
375 d40d->lli_log.dst = NULL;
376 d40d->lli_phy.src = NULL;
377 d40d->lli_phy.dst = NULL;
380 static int d40_lcla_alloc_one(struct d40_chan *d40c,
381 struct d40_desc *d40d)
388 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
390 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
393 * Allocate both src and dst at the same time, therefore the half
394 * start on 1 since 0 can't be used since zero is used as end marker.
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
398 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
405 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
410 static int d40_lcla_free_all(struct d40_chan *d40c,
411 struct d40_desc *d40d)
417 if (chan_is_physical(d40c))
420 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
422 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
423 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
424 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
425 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
426 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
428 if (d40d->lcla_alloc == 0) {
435 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
441 static void d40_desc_remove(struct d40_desc *d40d)
443 list_del(&d40d->node);
446 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
448 struct d40_desc *desc = NULL;
450 if (!list_empty(&d40c->client)) {
454 list_for_each_entry_safe(d, _d, &d40c->client, node)
455 if (async_tx_test_ack(&d->txd)) {
456 d40_pool_lli_free(d);
459 memset(desc, 0, sizeof(*desc));
465 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
468 INIT_LIST_HEAD(&desc->node);
473 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
476 d40_lcla_free_all(d40c, d40d);
477 kmem_cache_free(d40c->base->desc_slab, d40d);
480 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
482 list_add_tail(&desc->node, &d40c->active);
485 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
487 int curr_lcla = -EINVAL, next_lcla;
489 if (chan_is_physical(d40c)) {
490 d40_phy_lli_write(d40c->base->virtbase,
494 d40d->lli_current = d40d->lli_len;
497 if ((d40d->lli_len - d40d->lli_current) > 1)
498 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
500 d40_log_lli_lcpa_write(d40c->lcpa,
501 &d40d->lli_log.dst[d40d->lli_current],
502 &d40d->lli_log.src[d40d->lli_current],
506 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
507 struct d40_log_lli *lcla;
509 if (d40d->lli_current + 1 < d40d->lli_len)
510 next_lcla = d40_lcla_alloc_one(d40c, d40d);
514 lcla = d40c->base->lcla_pool.base +
515 d40c->phy_chan->num * 1024 +
518 d40_log_lli_lcla_write(lcla,
519 &d40d->lli_log.dst[d40d->lli_current],
520 &d40d->lli_log.src[d40d->lli_current],
523 (void) dma_map_single(d40c->base->dev, lcla,
524 2 * sizeof(struct d40_log_lli),
527 curr_lcla = next_lcla;
529 if (curr_lcla == -EINVAL) {
538 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
542 if (list_empty(&d40c->active))
545 d = list_first_entry(&d40c->active,
551 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
553 list_add_tail(&desc->node, &d40c->queue);
556 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
560 if (list_empty(&d40c->queue))
563 d = list_first_entry(&d40c->queue,
569 static int d40_psize_2_burst_size(bool is_log, int psize)
572 if (psize == STEDMA40_PSIZE_LOG_1)
575 if (psize == STEDMA40_PSIZE_PHY_1)
583 * The dma only supports transmitting packages up to
584 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
585 * dma elements required to send the entire sg list
587 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
590 u32 max_w = max(data_width1, data_width2);
591 u32 min_w = min(data_width1, data_width2);
592 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
594 if (seg_max > STEDMA40_MAX_SEG_SIZE)
595 seg_max -= (1 << max_w);
597 if (!IS_ALIGNED(size, 1 << max_w))
603 dmalen = size / seg_max;
604 if (dmalen * seg_max < size)
610 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
611 u32 data_width1, u32 data_width2)
613 struct scatterlist *sg;
618 for_each_sg(sgl, sg, sg_len, i) {
619 ret = d40_size_2_dmalen(sg_dma_len(sg),
620 data_width1, data_width2);
628 /* Support functions for logical channels */
630 static int d40_channel_execute_command(struct d40_chan *d40c,
631 enum d40_command command)
635 void __iomem *active_reg;
640 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
642 if (d40c->phy_chan->num % 2 == 0)
643 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
645 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
647 if (command == D40_DMA_SUSPEND_REQ) {
648 status = (readl(active_reg) &
649 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
650 D40_CHAN_POS(d40c->phy_chan->num);
652 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
656 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
657 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
660 if (command == D40_DMA_SUSPEND_REQ) {
662 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
663 status = (readl(active_reg) &
664 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
665 D40_CHAN_POS(d40c->phy_chan->num);
669 * Reduce the number of bus accesses while
670 * waiting for the DMA to suspend.
674 if (status == D40_DMA_STOP ||
675 status == D40_DMA_SUSPENDED)
679 if (i == D40_SUSPEND_MAX_IT) {
681 "unable to suspend the chl %d (log: %d) status %x\n",
682 d40c->phy_chan->num, d40c->log_num,
690 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
694 static void d40_term_all(struct d40_chan *d40c)
696 struct d40_desc *d40d;
698 /* Release active descriptors */
699 while ((d40d = d40_first_active_get(d40c))) {
700 d40_desc_remove(d40d);
701 d40_desc_free(d40c, d40d);
704 /* Release queued descriptors waiting for transfer */
705 while ((d40d = d40_first_queued(d40c))) {
706 d40_desc_remove(d40d);
707 d40_desc_free(d40c, d40d);
711 d40c->pending_tx = 0;
715 static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
718 void __iomem *addr = chan_base(d40c) + reg;
722 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
723 | ~D40_EVENTLINE_MASK(event), addr);
728 * The hardware sometimes doesn't register the enable when src and dst
729 * event lines are active on the same logical channel. Retry to ensure
730 * it does. Usually only one retry is sufficient.
734 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
735 | ~D40_EVENTLINE_MASK(event), addr);
737 if (readl(addr) & D40_EVENTLINE_MASK(event))
742 dev_dbg(chan2dev(d40c),
743 "[%s] workaround enable S%cLNK (%d tries)\n",
744 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
750 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
754 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
756 /* Enable event line connected to device (or memcpy) */
757 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
758 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
759 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
761 __d40_config_set_event(d40c, do_enable, event,
765 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
766 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
768 __d40_config_set_event(d40c, do_enable, event,
772 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
775 static u32 d40_chan_has_events(struct d40_chan *d40c)
777 void __iomem *chanbase = chan_base(d40c);
780 val = readl(chanbase + D40_CHAN_REG_SSLNK);
781 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
786 static u32 d40_get_prmo(struct d40_chan *d40c)
788 static const unsigned int phy_map[] = {
789 [STEDMA40_PCHAN_BASIC_MODE]
790 = D40_DREG_PRMO_PCHAN_BASIC,
791 [STEDMA40_PCHAN_MODULO_MODE]
792 = D40_DREG_PRMO_PCHAN_MODULO,
793 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
794 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
796 static const unsigned int log_map[] = {
797 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
798 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
799 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
800 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
801 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
802 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
805 if (chan_is_physical(d40c))
806 return phy_map[d40c->dma_cfg.mode_opt];
808 return log_map[d40c->dma_cfg.mode_opt];
811 static void d40_config_write(struct d40_chan *d40c)
816 /* Odd addresses are even addresses + 4 */
817 addr_base = (d40c->phy_chan->num % 2) * 4;
818 /* Setup channel mode to logical or physical */
819 var = ((u32)(chan_is_logical(d40c)) + 1) <<
820 D40_CHAN_POS(d40c->phy_chan->num);
821 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
823 /* Setup operational mode option register */
824 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
826 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
828 if (chan_is_logical(d40c)) {
829 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
830 & D40_SREG_ELEM_LOG_LIDX_MASK;
831 void __iomem *chanbase = chan_base(d40c);
833 /* Set default config for CFG reg */
834 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
835 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
837 /* Set LIDX for lcla */
838 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
839 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
843 static u32 d40_residue(struct d40_chan *d40c)
847 if (chan_is_logical(d40c))
848 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
849 >> D40_MEM_LCSP2_ECNT_POS;
851 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
852 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
853 >> D40_SREG_ELEM_PHY_ECNT_POS;
856 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
859 static bool d40_tx_is_linked(struct d40_chan *d40c)
863 if (chan_is_logical(d40c))
864 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
866 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
867 & D40_SREG_LNK_PHYS_LNK_MASK;
872 static int d40_pause(struct dma_chan *chan)
874 struct d40_chan *d40c =
875 container_of(chan, struct d40_chan, chan);
882 spin_lock_irqsave(&d40c->lock, flags);
884 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
886 if (chan_is_logical(d40c)) {
887 d40_config_set_event(d40c, false);
888 /* Resume the other logical channels if any */
889 if (d40_chan_has_events(d40c))
890 res = d40_channel_execute_command(d40c,
895 spin_unlock_irqrestore(&d40c->lock, flags);
899 static int d40_resume(struct dma_chan *chan)
901 struct d40_chan *d40c =
902 container_of(chan, struct d40_chan, chan);
909 spin_lock_irqsave(&d40c->lock, flags);
911 if (d40c->base->rev == 0)
912 if (chan_is_logical(d40c)) {
913 res = d40_channel_execute_command(d40c,
914 D40_DMA_SUSPEND_REQ);
918 /* If bytes left to transfer or linked tx resume job */
919 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
921 if (chan_is_logical(d40c))
922 d40_config_set_event(d40c, true);
924 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
928 spin_unlock_irqrestore(&d40c->lock, flags);
932 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
934 struct d40_chan *d40c = container_of(tx->chan,
937 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
940 spin_lock_irqsave(&d40c->lock, flags);
944 if (d40c->chan.cookie < 0)
945 d40c->chan.cookie = 1;
947 d40d->txd.cookie = d40c->chan.cookie;
949 d40_desc_queue(d40c, d40d);
951 spin_unlock_irqrestore(&d40c->lock, flags);
956 static int d40_start(struct d40_chan *d40c)
958 if (d40c->base->rev == 0) {
961 if (chan_is_logical(d40c)) {
962 err = d40_channel_execute_command(d40c,
963 D40_DMA_SUSPEND_REQ);
969 if (chan_is_logical(d40c))
970 d40_config_set_event(d40c, true);
972 return d40_channel_execute_command(d40c, D40_DMA_RUN);
975 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
977 struct d40_desc *d40d;
980 /* Start queued jobs, if any */
981 d40d = d40_first_queued(d40c);
986 /* Remove from queue */
987 d40_desc_remove(d40d);
989 /* Add to active queue */
990 d40_desc_submit(d40c, d40d);
992 /* Initiate DMA job */
993 d40_desc_load(d40c, d40d);
996 err = d40_start(d40c);
1005 /* called from interrupt context */
1006 static void dma_tc_handle(struct d40_chan *d40c)
1008 struct d40_desc *d40d;
1010 /* Get first active entry from list */
1011 d40d = d40_first_active_get(d40c);
1016 d40_lcla_free_all(d40c, d40d);
1018 if (d40d->lli_current < d40d->lli_len) {
1019 d40_desc_load(d40c, d40d);
1021 (void) d40_start(d40c);
1025 if (d40_queue_start(d40c) == NULL)
1029 tasklet_schedule(&d40c->tasklet);
1033 static void dma_tasklet(unsigned long data)
1035 struct d40_chan *d40c = (struct d40_chan *) data;
1036 struct d40_desc *d40d;
1037 unsigned long flags;
1038 dma_async_tx_callback callback;
1039 void *callback_param;
1041 spin_lock_irqsave(&d40c->lock, flags);
1043 /* Get first active entry from list */
1044 d40d = d40_first_active_get(d40c);
1049 d40c->completed = d40d->txd.cookie;
1052 * If terminating a channel pending_tx is set to zero.
1053 * This prevents any finished active jobs to return to the client.
1055 if (d40c->pending_tx == 0) {
1056 spin_unlock_irqrestore(&d40c->lock, flags);
1060 /* Callback to client */
1061 callback = d40d->txd.callback;
1062 callback_param = d40d->txd.callback_param;
1064 if (async_tx_test_ack(&d40d->txd)) {
1065 d40_pool_lli_free(d40d);
1066 d40_desc_remove(d40d);
1067 d40_desc_free(d40c, d40d);
1069 if (!d40d->is_in_client_list) {
1070 d40_desc_remove(d40d);
1071 d40_lcla_free_all(d40c, d40d);
1072 list_add_tail(&d40d->node, &d40c->client);
1073 d40d->is_in_client_list = true;
1079 if (d40c->pending_tx)
1080 tasklet_schedule(&d40c->tasklet);
1082 spin_unlock_irqrestore(&d40c->lock, flags);
1084 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1085 callback(callback_param);
1090 /* Rescue manouver if receiving double interrupts */
1091 if (d40c->pending_tx > 0)
1093 spin_unlock_irqrestore(&d40c->lock, flags);
1096 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1098 static const struct d40_interrupt_lookup il[] = {
1099 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1100 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1101 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1102 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1103 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1104 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1105 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1106 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1107 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1108 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1112 u32 regs[ARRAY_SIZE(il)];
1116 struct d40_chan *d40c;
1117 unsigned long flags;
1118 struct d40_base *base = data;
1120 spin_lock_irqsave(&base->interrupt_lock, flags);
1122 /* Read interrupt status of both logical and physical channels */
1123 for (i = 0; i < ARRAY_SIZE(il); i++)
1124 regs[i] = readl(base->virtbase + il[i].src);
1128 chan = find_next_bit((unsigned long *)regs,
1129 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1131 /* No more set bits found? */
1132 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1135 row = chan / BITS_PER_LONG;
1136 idx = chan & (BITS_PER_LONG - 1);
1139 writel(1 << idx, base->virtbase + il[row].clr);
1141 if (il[row].offset == D40_PHY_CHAN)
1142 d40c = base->lookup_phy_chans[idx];
1144 d40c = base->lookup_log_chans[il[row].offset + idx];
1145 spin_lock(&d40c->lock);
1147 if (!il[row].is_error)
1148 dma_tc_handle(d40c);
1150 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1151 chan, il[row].offset, idx);
1153 spin_unlock(&d40c->lock);
1156 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1161 static int d40_validate_conf(struct d40_chan *d40c,
1162 struct stedma40_chan_cfg *conf)
1165 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1166 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1167 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1170 chan_err(d40c, "Invalid direction.\n");
1174 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1175 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1176 d40c->runtime_addr == 0) {
1178 chan_err(d40c, "Invalid TX channel address (%d)\n",
1179 conf->dst_dev_type);
1183 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1184 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1185 d40c->runtime_addr == 0) {
1186 chan_err(d40c, "Invalid RX channel address (%d)\n",
1187 conf->src_dev_type);
1191 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1192 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1193 chan_err(d40c, "Invalid dst\n");
1197 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1198 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1199 chan_err(d40c, "Invalid src\n");
1203 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1204 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1205 chan_err(d40c, "No event line\n");
1209 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1210 (src_event_group != dst_event_group)) {
1211 chan_err(d40c, "Invalid event group\n");
1215 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1217 * DMAC HW supports it. Will be added to this driver,
1218 * in case any dma client requires it.
1220 chan_err(d40c, "periph to periph not supported\n");
1224 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1225 (1 << conf->src_info.data_width) !=
1226 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1227 (1 << conf->dst_info.data_width)) {
1229 * The DMAC hardware only supports
1230 * src (burst x width) == dst (burst x width)
1233 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1240 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1241 int log_event_line, bool is_log)
1243 unsigned long flags;
1244 spin_lock_irqsave(&phy->lock, flags);
1246 /* Physical interrupts are masked per physical full channel */
1247 if (phy->allocated_src == D40_ALLOC_FREE &&
1248 phy->allocated_dst == D40_ALLOC_FREE) {
1249 phy->allocated_dst = D40_ALLOC_PHY;
1250 phy->allocated_src = D40_ALLOC_PHY;
1256 /* Logical channel */
1258 if (phy->allocated_src == D40_ALLOC_PHY)
1261 if (phy->allocated_src == D40_ALLOC_FREE)
1262 phy->allocated_src = D40_ALLOC_LOG_FREE;
1264 if (!(phy->allocated_src & (1 << log_event_line))) {
1265 phy->allocated_src |= 1 << log_event_line;
1270 if (phy->allocated_dst == D40_ALLOC_PHY)
1273 if (phy->allocated_dst == D40_ALLOC_FREE)
1274 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1276 if (!(phy->allocated_dst & (1 << log_event_line))) {
1277 phy->allocated_dst |= 1 << log_event_line;
1284 spin_unlock_irqrestore(&phy->lock, flags);
1287 spin_unlock_irqrestore(&phy->lock, flags);
1291 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1294 unsigned long flags;
1295 bool is_free = false;
1297 spin_lock_irqsave(&phy->lock, flags);
1298 if (!log_event_line) {
1299 phy->allocated_dst = D40_ALLOC_FREE;
1300 phy->allocated_src = D40_ALLOC_FREE;
1305 /* Logical channel */
1307 phy->allocated_src &= ~(1 << log_event_line);
1308 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1309 phy->allocated_src = D40_ALLOC_FREE;
1311 phy->allocated_dst &= ~(1 << log_event_line);
1312 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1313 phy->allocated_dst = D40_ALLOC_FREE;
1316 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1320 spin_unlock_irqrestore(&phy->lock, flags);
1325 static int d40_allocate_channel(struct d40_chan *d40c)
1330 struct d40_phy_res *phys;
1335 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1337 phys = d40c->base->phy_res;
1339 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1340 dev_type = d40c->dma_cfg.src_dev_type;
1341 log_num = 2 * dev_type;
1343 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1344 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1345 /* dst event lines are used for logical memcpy */
1346 dev_type = d40c->dma_cfg.dst_dev_type;
1347 log_num = 2 * dev_type + 1;
1352 event_group = D40_TYPE_TO_GROUP(dev_type);
1353 event_line = D40_TYPE_TO_EVENT(dev_type);
1356 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1357 /* Find physical half channel */
1358 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1360 if (d40_alloc_mask_set(&phys[i], is_src,
1365 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1366 int phy_num = j + event_group * 2;
1367 for (i = phy_num; i < phy_num + 2; i++) {
1368 if (d40_alloc_mask_set(&phys[i],
1377 d40c->phy_chan = &phys[i];
1378 d40c->log_num = D40_PHY_CHAN;
1384 /* Find logical channel */
1385 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1386 int phy_num = j + event_group * 2;
1388 * Spread logical channels across all available physical rather
1389 * than pack every logical channel at the first available phy
1393 for (i = phy_num; i < phy_num + 2; i++) {
1394 if (d40_alloc_mask_set(&phys[i], is_src,
1395 event_line, is_log))
1399 for (i = phy_num + 1; i >= phy_num; i--) {
1400 if (d40_alloc_mask_set(&phys[i], is_src,
1401 event_line, is_log))
1409 d40c->phy_chan = &phys[i];
1410 d40c->log_num = log_num;
1414 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1416 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1422 static int d40_config_memcpy(struct d40_chan *d40c)
1424 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1426 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1427 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1428 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1429 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1430 memcpy[d40c->chan.chan_id];
1432 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1433 dma_has_cap(DMA_SLAVE, cap)) {
1434 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1436 chan_err(d40c, "No memcpy\n");
1444 static int d40_free_dma(struct d40_chan *d40c)
1449 struct d40_phy_res *phy = d40c->phy_chan;
1452 struct d40_desc *_d;
1455 /* Terminate all queued and active transfers */
1458 /* Release client owned descriptors */
1459 if (!list_empty(&d40c->client))
1460 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1461 d40_pool_lli_free(d);
1463 d40_desc_free(d40c, d);
1467 chan_err(d40c, "phy == null\n");
1471 if (phy->allocated_src == D40_ALLOC_FREE &&
1472 phy->allocated_dst == D40_ALLOC_FREE) {
1473 chan_err(d40c, "channel already free\n");
1477 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1478 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1479 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1481 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1482 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1485 chan_err(d40c, "Unknown direction\n");
1489 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1491 chan_err(d40c, "suspend failed\n");
1495 if (chan_is_logical(d40c)) {
1496 /* Release logical channel, deactivate the event line */
1498 d40_config_set_event(d40c, false);
1499 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1502 * Check if there are more logical allocation
1503 * on this phy channel.
1505 if (!d40_alloc_mask_free(phy, is_src, event)) {
1506 /* Resume the other logical channels if any */
1507 if (d40_chan_has_events(d40c)) {
1508 res = d40_channel_execute_command(d40c,
1512 "Executing RUN command\n");
1519 (void) d40_alloc_mask_free(phy, is_src, 0);
1522 /* Release physical channel */
1523 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1525 chan_err(d40c, "Failed to stop channel\n");
1528 d40c->phy_chan = NULL;
1529 d40c->configured = false;
1530 d40c->base->lookup_phy_chans[phy->num] = NULL;
1535 static bool d40_is_paused(struct d40_chan *d40c)
1537 void __iomem *chanbase = chan_base(d40c);
1538 bool is_paused = false;
1539 unsigned long flags;
1540 void __iomem *active_reg;
1544 spin_lock_irqsave(&d40c->lock, flags);
1546 if (chan_is_physical(d40c)) {
1547 if (d40c->phy_chan->num % 2 == 0)
1548 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1550 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1552 status = (readl(active_reg) &
1553 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1554 D40_CHAN_POS(d40c->phy_chan->num);
1555 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1561 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1562 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1563 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1564 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1565 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1566 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1567 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1569 chan_err(d40c, "Unknown direction\n");
1573 status = (status & D40_EVENTLINE_MASK(event)) >>
1574 D40_EVENTLINE_POS(event);
1576 if (status != D40_DMA_RUN)
1579 spin_unlock_irqrestore(&d40c->lock, flags);
1585 static u32 stedma40_residue(struct dma_chan *chan)
1587 struct d40_chan *d40c =
1588 container_of(chan, struct d40_chan, chan);
1590 unsigned long flags;
1592 spin_lock_irqsave(&d40c->lock, flags);
1593 bytes_left = d40_residue(d40c);
1594 spin_unlock_irqrestore(&d40c->lock, flags);
1599 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1600 struct scatterlist *sgl_dst,
1601 struct scatterlist *sgl_src,
1602 unsigned int sgl_len,
1603 unsigned long dma_flags)
1606 struct d40_desc *d40d;
1607 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1609 unsigned long flags;
1611 if (d40c->phy_chan == NULL) {
1612 chan_err(d40c, "Unallocated channel.\n");
1613 return ERR_PTR(-EINVAL);
1616 spin_lock_irqsave(&d40c->lock, flags);
1617 d40d = d40_desc_get(d40c);
1622 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1623 d40c->dma_cfg.src_info.data_width,
1624 d40c->dma_cfg.dst_info.data_width);
1625 if (d40d->lli_len < 0) {
1626 chan_err(d40c, "Unaligned size\n");
1630 d40d->lli_current = 0;
1631 d40d->txd.flags = dma_flags;
1633 if (chan_is_logical(d40c)) {
1635 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1636 chan_err(d40c, "Out of memory\n");
1640 (void) d40_log_sg_to_lli(sgl_src,
1643 d40c->log_def.lcsp1,
1644 d40c->dma_cfg.src_info.data_width,
1645 d40c->dma_cfg.dst_info.data_width);
1647 (void) d40_log_sg_to_lli(sgl_dst,
1650 d40c->log_def.lcsp3,
1651 d40c->dma_cfg.dst_info.data_width,
1652 d40c->dma_cfg.src_info.data_width);
1654 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1655 chan_err(d40c, "Out of memory\n");
1659 res = d40_phy_sg_to_lli(sgl_src,
1663 virt_to_phys(d40d->lli_phy.src),
1665 d40c->dma_cfg.src_info.data_width,
1666 d40c->dma_cfg.dst_info.data_width,
1667 d40c->dma_cfg.src_info.psize);
1672 res = d40_phy_sg_to_lli(sgl_dst,
1676 virt_to_phys(d40d->lli_phy.dst),
1678 d40c->dma_cfg.dst_info.data_width,
1679 d40c->dma_cfg.src_info.data_width,
1680 d40c->dma_cfg.dst_info.psize);
1685 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1686 d40d->lli_pool.size, DMA_TO_DEVICE);
1689 dma_async_tx_descriptor_init(&d40d->txd, chan);
1691 d40d->txd.tx_submit = d40_tx_submit;
1693 spin_unlock_irqrestore(&d40c->lock, flags);
1698 d40_desc_free(d40c, d40d);
1699 spin_unlock_irqrestore(&d40c->lock, flags);
1702 EXPORT_SYMBOL(stedma40_memcpy_sg);
1704 bool stedma40_filter(struct dma_chan *chan, void *data)
1706 struct stedma40_chan_cfg *info = data;
1707 struct d40_chan *d40c =
1708 container_of(chan, struct d40_chan, chan);
1712 err = d40_validate_conf(d40c, info);
1714 d40c->dma_cfg = *info;
1716 err = d40_config_memcpy(d40c);
1719 d40c->configured = true;
1723 EXPORT_SYMBOL(stedma40_filter);
1725 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1727 bool realtime = d40c->dma_cfg.realtime;
1728 bool highprio = d40c->dma_cfg.high_priority;
1729 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1730 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1731 u32 event = D40_TYPE_TO_EVENT(dev_type);
1732 u32 group = D40_TYPE_TO_GROUP(dev_type);
1733 u32 bit = 1 << event;
1735 /* Destination event lines are stored in the upper halfword */
1739 writel(bit, d40c->base->virtbase + prioreg + group * 4);
1740 writel(bit, d40c->base->virtbase + rtreg + group * 4);
1743 static void d40_set_prio_realtime(struct d40_chan *d40c)
1745 if (d40c->base->rev < 3)
1748 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1749 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1750 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1752 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
1753 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1754 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
1757 /* DMA ENGINE functions */
1758 static int d40_alloc_chan_resources(struct dma_chan *chan)
1761 unsigned long flags;
1762 struct d40_chan *d40c =
1763 container_of(chan, struct d40_chan, chan);
1765 spin_lock_irqsave(&d40c->lock, flags);
1767 d40c->completed = chan->cookie = 1;
1769 /* If no dma configuration is set use default configuration (memcpy) */
1770 if (!d40c->configured) {
1771 err = d40_config_memcpy(d40c);
1773 chan_err(d40c, "Failed to configure memcpy channel\n");
1777 is_free_phy = (d40c->phy_chan == NULL);
1779 err = d40_allocate_channel(d40c);
1781 chan_err(d40c, "Failed to allocate channel\n");
1785 /* Fill in basic CFG register values */
1786 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1787 &d40c->dst_def_cfg, chan_is_logical(d40c));
1789 d40_set_prio_realtime(d40c);
1791 if (chan_is_logical(d40c)) {
1792 d40_log_cfg(&d40c->dma_cfg,
1793 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1795 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1796 d40c->lcpa = d40c->base->lcpa_base +
1797 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1799 d40c->lcpa = d40c->base->lcpa_base +
1800 d40c->dma_cfg.dst_dev_type *
1801 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1805 * Only write channel configuration to the DMA if the physical
1806 * resource is free. In case of multiple logical channels
1807 * on the same physical resource, only the first write is necessary.
1810 d40_config_write(d40c);
1812 spin_unlock_irqrestore(&d40c->lock, flags);
1816 static void d40_free_chan_resources(struct dma_chan *chan)
1818 struct d40_chan *d40c =
1819 container_of(chan, struct d40_chan, chan);
1821 unsigned long flags;
1823 if (d40c->phy_chan == NULL) {
1824 chan_err(d40c, "Cannot free unallocated channel\n");
1829 spin_lock_irqsave(&d40c->lock, flags);
1831 err = d40_free_dma(d40c);
1834 chan_err(d40c, "Failed to free channel\n");
1835 spin_unlock_irqrestore(&d40c->lock, flags);
1838 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1842 unsigned long dma_flags)
1844 struct d40_desc *d40d;
1845 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1847 unsigned long flags;
1849 if (d40c->phy_chan == NULL) {
1850 chan_err(d40c, "Channel is not allocated.\n");
1851 return ERR_PTR(-EINVAL);
1854 spin_lock_irqsave(&d40c->lock, flags);
1855 d40d = d40_desc_get(d40c);
1858 chan_err(d40c, "Descriptor is NULL\n");
1862 d40d->txd.flags = dma_flags;
1863 d40d->lli_len = d40_size_2_dmalen(size,
1864 d40c->dma_cfg.src_info.data_width,
1865 d40c->dma_cfg.dst_info.data_width);
1866 if (d40d->lli_len < 0) {
1867 chan_err(d40c, "Unaligned size\n");
1872 dma_async_tx_descriptor_init(&d40d->txd, chan);
1874 d40d->txd.tx_submit = d40_tx_submit;
1876 if (chan_is_logical(d40c)) {
1878 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1879 chan_err(d40c, "Out of memory\n");
1882 d40d->lli_current = 0;
1884 if (d40_log_buf_to_lli(d40d->lli_log.src,
1887 d40c->log_def.lcsp1,
1888 d40c->dma_cfg.src_info.data_width,
1889 d40c->dma_cfg.dst_info.data_width,
1893 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1896 d40c->log_def.lcsp3,
1897 d40c->dma_cfg.dst_info.data_width,
1898 d40c->dma_cfg.src_info.data_width,
1904 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1905 chan_err(d40c, "Out of memory\n");
1909 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1912 d40c->dma_cfg.src_info.psize,
1916 d40c->dma_cfg.src_info.data_width,
1917 d40c->dma_cfg.dst_info.data_width,
1921 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1924 d40c->dma_cfg.dst_info.psize,
1928 d40c->dma_cfg.dst_info.data_width,
1929 d40c->dma_cfg.src_info.data_width,
1933 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1934 d40d->lli_pool.size, DMA_TO_DEVICE);
1937 spin_unlock_irqrestore(&d40c->lock, flags);
1942 d40_desc_free(d40c, d40d);
1943 spin_unlock_irqrestore(&d40c->lock, flags);
1947 static struct dma_async_tx_descriptor *
1948 d40_prep_sg(struct dma_chan *chan,
1949 struct scatterlist *dst_sg, unsigned int dst_nents,
1950 struct scatterlist *src_sg, unsigned int src_nents,
1951 unsigned long dma_flags)
1953 if (dst_nents != src_nents)
1956 return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
1959 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1960 struct d40_chan *d40c,
1961 struct scatterlist *sgl,
1962 unsigned int sg_len,
1963 enum dma_data_direction direction,
1964 unsigned long dma_flags)
1966 dma_addr_t dev_addr = 0;
1969 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
1970 d40c->dma_cfg.src_info.data_width,
1971 d40c->dma_cfg.dst_info.data_width);
1972 if (d40d->lli_len < 0) {
1973 chan_err(d40c, "Unaligned size\n");
1977 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1978 chan_err(d40c, "Out of memory\n");
1982 d40d->lli_current = 0;
1984 if (direction == DMA_FROM_DEVICE)
1985 if (d40c->runtime_addr)
1986 dev_addr = d40c->runtime_addr;
1988 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1989 else if (direction == DMA_TO_DEVICE)
1990 if (d40c->runtime_addr)
1991 dev_addr = d40c->runtime_addr;
1993 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1998 total_size = d40_log_sg_to_dev(sgl, sg_len,
2001 d40c->dma_cfg.src_info.data_width,
2002 d40c->dma_cfg.dst_info.data_width,
2012 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2013 struct d40_chan *d40c,
2014 struct scatterlist *sgl,
2015 unsigned int sgl_len,
2016 enum dma_data_direction direction,
2017 unsigned long dma_flags)
2019 dma_addr_t src_dev_addr;
2020 dma_addr_t dst_dev_addr;
2023 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2024 d40c->dma_cfg.src_info.data_width,
2025 d40c->dma_cfg.dst_info.data_width);
2026 if (d40d->lli_len < 0) {
2027 chan_err(d40c, "Unaligned size\n");
2031 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
2032 chan_err(d40c, "Out of memory\n");
2036 d40d->lli_current = 0;
2038 if (direction == DMA_FROM_DEVICE) {
2040 if (d40c->runtime_addr)
2041 src_dev_addr = d40c->runtime_addr;
2043 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2044 } else if (direction == DMA_TO_DEVICE) {
2045 if (d40c->runtime_addr)
2046 dst_dev_addr = d40c->runtime_addr;
2048 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2053 res = d40_phy_sg_to_lli(sgl,
2057 virt_to_phys(d40d->lli_phy.src),
2059 d40c->dma_cfg.src_info.data_width,
2060 d40c->dma_cfg.dst_info.data_width,
2061 d40c->dma_cfg.src_info.psize);
2065 res = d40_phy_sg_to_lli(sgl,
2069 virt_to_phys(d40d->lli_phy.dst),
2071 d40c->dma_cfg.dst_info.data_width,
2072 d40c->dma_cfg.src_info.data_width,
2073 d40c->dma_cfg.dst_info.psize);
2077 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2078 d40d->lli_pool.size, DMA_TO_DEVICE);
2082 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2083 struct scatterlist *sgl,
2084 unsigned int sg_len,
2085 enum dma_data_direction direction,
2086 unsigned long dma_flags)
2088 struct d40_desc *d40d;
2089 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2091 unsigned long flags;
2094 if (d40c->phy_chan == NULL) {
2095 chan_err(d40c, "Cannot prepare unallocated channel\n");
2096 return ERR_PTR(-EINVAL);
2099 spin_lock_irqsave(&d40c->lock, flags);
2100 d40d = d40_desc_get(d40c);
2105 if (chan_is_logical(d40c))
2106 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2107 direction, dma_flags);
2109 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2110 direction, dma_flags);
2112 chan_err(d40c, "Failed to prepare %s slave sg job: %d\n",
2113 chan_is_logical(d40c) ? "log" : "phy", err);
2117 d40d->txd.flags = dma_flags;
2119 dma_async_tx_descriptor_init(&d40d->txd, chan);
2121 d40d->txd.tx_submit = d40_tx_submit;
2123 spin_unlock_irqrestore(&d40c->lock, flags);
2128 d40_desc_free(d40c, d40d);
2129 spin_unlock_irqrestore(&d40c->lock, flags);
2133 static enum dma_status d40_tx_status(struct dma_chan *chan,
2134 dma_cookie_t cookie,
2135 struct dma_tx_state *txstate)
2137 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2138 dma_cookie_t last_used;
2139 dma_cookie_t last_complete;
2142 if (d40c->phy_chan == NULL) {
2143 chan_err(d40c, "Cannot read status of unallocated channel\n");
2147 last_complete = d40c->completed;
2148 last_used = chan->cookie;
2150 if (d40_is_paused(d40c))
2153 ret = dma_async_is_complete(cookie, last_complete, last_used);
2155 dma_set_tx_state(txstate, last_complete, last_used,
2156 stedma40_residue(chan));
2161 static void d40_issue_pending(struct dma_chan *chan)
2163 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2164 unsigned long flags;
2166 if (d40c->phy_chan == NULL) {
2167 chan_err(d40c, "Channel is not allocated!\n");
2171 spin_lock_irqsave(&d40c->lock, flags);
2173 /* Busy means that pending jobs are already being processed */
2175 (void) d40_queue_start(d40c);
2177 spin_unlock_irqrestore(&d40c->lock, flags);
2180 /* Runtime reconfiguration extension */
2181 static void d40_set_runtime_config(struct dma_chan *chan,
2182 struct dma_slave_config *config)
2184 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2185 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2186 enum dma_slave_buswidth config_addr_width;
2187 dma_addr_t config_addr;
2188 u32 config_maxburst;
2189 enum stedma40_periph_data_width addr_width;
2192 if (config->direction == DMA_FROM_DEVICE) {
2193 dma_addr_t dev_addr_rx =
2194 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2196 config_addr = config->src_addr;
2198 dev_dbg(d40c->base->dev,
2199 "channel has a pre-wired RX address %08x "
2200 "overriding with %08x\n",
2201 dev_addr_rx, config_addr);
2202 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2203 dev_dbg(d40c->base->dev,
2204 "channel was not configured for peripheral "
2205 "to memory transfer (%d) overriding\n",
2207 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2209 config_addr_width = config->src_addr_width;
2210 config_maxburst = config->src_maxburst;
2212 } else if (config->direction == DMA_TO_DEVICE) {
2213 dma_addr_t dev_addr_tx =
2214 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2216 config_addr = config->dst_addr;
2218 dev_dbg(d40c->base->dev,
2219 "channel has a pre-wired TX address %08x "
2220 "overriding with %08x\n",
2221 dev_addr_tx, config_addr);
2222 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2223 dev_dbg(d40c->base->dev,
2224 "channel was not configured for memory "
2225 "to peripheral transfer (%d) overriding\n",
2227 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2229 config_addr_width = config->dst_addr_width;
2230 config_maxburst = config->dst_maxburst;
2233 dev_err(d40c->base->dev,
2234 "unrecognized channel direction %d\n",
2239 switch (config_addr_width) {
2240 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2241 addr_width = STEDMA40_BYTE_WIDTH;
2243 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2244 addr_width = STEDMA40_HALFWORD_WIDTH;
2246 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2247 addr_width = STEDMA40_WORD_WIDTH;
2249 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2250 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2253 dev_err(d40c->base->dev,
2254 "illegal peripheral address width "
2256 config->src_addr_width);
2260 if (chan_is_logical(d40c)) {
2261 if (config_maxburst >= 16)
2262 psize = STEDMA40_PSIZE_LOG_16;
2263 else if (config_maxburst >= 8)
2264 psize = STEDMA40_PSIZE_LOG_8;
2265 else if (config_maxburst >= 4)
2266 psize = STEDMA40_PSIZE_LOG_4;
2268 psize = STEDMA40_PSIZE_LOG_1;
2270 if (config_maxburst >= 16)
2271 psize = STEDMA40_PSIZE_PHY_16;
2272 else if (config_maxburst >= 8)
2273 psize = STEDMA40_PSIZE_PHY_8;
2274 else if (config_maxburst >= 4)
2275 psize = STEDMA40_PSIZE_PHY_4;
2276 else if (config_maxburst >= 2)
2277 psize = STEDMA40_PSIZE_PHY_2;
2279 psize = STEDMA40_PSIZE_PHY_1;
2282 /* Set up all the endpoint configs */
2283 cfg->src_info.data_width = addr_width;
2284 cfg->src_info.psize = psize;
2285 cfg->src_info.big_endian = false;
2286 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2287 cfg->dst_info.data_width = addr_width;
2288 cfg->dst_info.psize = psize;
2289 cfg->dst_info.big_endian = false;
2290 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2292 /* Fill in register values */
2293 if (chan_is_logical(d40c))
2294 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2296 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2297 &d40c->dst_def_cfg, false);
2299 /* These settings will take precedence later */
2300 d40c->runtime_addr = config_addr;
2301 d40c->runtime_direction = config->direction;
2302 dev_dbg(d40c->base->dev,
2303 "configured channel %s for %s, data width %d, "
2304 "maxburst %d bytes, LE, no flow control\n",
2305 dma_chan_name(chan),
2306 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2311 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2314 unsigned long flags;
2315 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2317 if (d40c->phy_chan == NULL) {
2318 chan_err(d40c, "Channel is not allocated!\n");
2323 case DMA_TERMINATE_ALL:
2324 spin_lock_irqsave(&d40c->lock, flags);
2326 spin_unlock_irqrestore(&d40c->lock, flags);
2329 return d40_pause(chan);
2331 return d40_resume(chan);
2332 case DMA_SLAVE_CONFIG:
2333 d40_set_runtime_config(chan,
2334 (struct dma_slave_config *) arg);
2340 /* Other commands are unimplemented */
2344 /* Initialization functions */
2346 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2347 struct d40_chan *chans, int offset,
2351 struct d40_chan *d40c;
2353 INIT_LIST_HEAD(&dma->channels);
2355 for (i = offset; i < offset + num_chans; i++) {
2358 d40c->chan.device = dma;
2360 spin_lock_init(&d40c->lock);
2362 d40c->log_num = D40_PHY_CHAN;
2364 INIT_LIST_HEAD(&d40c->active);
2365 INIT_LIST_HEAD(&d40c->queue);
2366 INIT_LIST_HEAD(&d40c->client);
2368 tasklet_init(&d40c->tasklet, dma_tasklet,
2369 (unsigned long) d40c);
2371 list_add_tail(&d40c->chan.device_node,
2376 static int __init d40_dmaengine_init(struct d40_base *base,
2377 int num_reserved_chans)
2381 d40_chan_init(base, &base->dma_slave, base->log_chans,
2382 0, base->num_log_chans);
2384 dma_cap_zero(base->dma_slave.cap_mask);
2385 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2387 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2388 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2389 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2390 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2391 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2392 base->dma_slave.device_tx_status = d40_tx_status;
2393 base->dma_slave.device_issue_pending = d40_issue_pending;
2394 base->dma_slave.device_control = d40_control;
2395 base->dma_slave.dev = base->dev;
2397 err = dma_async_device_register(&base->dma_slave);
2400 d40_err(base->dev, "Failed to register slave channels\n");
2404 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2405 base->num_log_chans, base->plat_data->memcpy_len);
2407 dma_cap_zero(base->dma_memcpy.cap_mask);
2408 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2409 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2411 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2412 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2413 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2414 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2415 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2416 base->dma_memcpy.device_tx_status = d40_tx_status;
2417 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2418 base->dma_memcpy.device_control = d40_control;
2419 base->dma_memcpy.dev = base->dev;
2421 * This controller can only access address at even
2422 * 32bit boundaries, i.e. 2^2
2424 base->dma_memcpy.copy_align = 2;
2426 err = dma_async_device_register(&base->dma_memcpy);
2430 "Failed to regsiter memcpy only channels\n");
2434 d40_chan_init(base, &base->dma_both, base->phy_chans,
2435 0, num_reserved_chans);
2437 dma_cap_zero(base->dma_both.cap_mask);
2438 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2439 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2440 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2442 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2443 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2444 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2445 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2446 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2447 base->dma_both.device_tx_status = d40_tx_status;
2448 base->dma_both.device_issue_pending = d40_issue_pending;
2449 base->dma_both.device_control = d40_control;
2450 base->dma_both.dev = base->dev;
2451 base->dma_both.copy_align = 2;
2452 err = dma_async_device_register(&base->dma_both);
2456 "Failed to register logical and physical capable channels\n");
2461 dma_async_device_unregister(&base->dma_memcpy);
2463 dma_async_device_unregister(&base->dma_slave);
2468 /* Initialization functions. */
2470 static int __init d40_phy_res_init(struct d40_base *base)
2473 int num_phy_chans_avail = 0;
2475 int odd_even_bit = -2;
2477 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2478 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2480 for (i = 0; i < base->num_phy_chans; i++) {
2481 base->phy_res[i].num = i;
2482 odd_even_bit += 2 * ((i % 2) == 0);
2483 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2484 /* Mark security only channels as occupied */
2485 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2486 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2488 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2489 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2490 num_phy_chans_avail++;
2492 spin_lock_init(&base->phy_res[i].lock);
2495 /* Mark disabled channels as occupied */
2496 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2497 int chan = base->plat_data->disabled_channels[i];
2499 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2500 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2501 num_phy_chans_avail--;
2504 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2505 num_phy_chans_avail, base->num_phy_chans);
2507 /* Verify settings extended vs standard */
2508 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2510 for (i = 0; i < base->num_phy_chans; i++) {
2512 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2513 (val[0] & 0x3) != 1)
2515 "[%s] INFO: channel %d is misconfigured (%d)\n",
2516 __func__, i, val[0] & 0x3);
2518 val[0] = val[0] >> 2;
2521 return num_phy_chans_avail;
2524 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2526 static const struct d40_reg_val dma_id_regs[] = {
2528 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2529 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2531 * D40_DREG_PERIPHID2 Depends on HW revision:
2532 * DB8500ed has 0x0008,
2534 * DB8500v1 has 0x0028
2535 * DB8500v2 has 0x0038
2537 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2540 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2541 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2542 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2543 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2545 struct stedma40_platform_data *plat_data;
2546 struct clk *clk = NULL;
2547 void __iomem *virtbase = NULL;
2548 struct resource *res = NULL;
2549 struct d40_base *base = NULL;
2550 int num_log_chans = 0;
2556 clk = clk_get(&pdev->dev, NULL);
2559 d40_err(&pdev->dev, "No matching clock found\n");
2565 /* Get IO for DMAC base address */
2566 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2570 if (request_mem_region(res->start, resource_size(res),
2571 D40_NAME " I/O base") == NULL)
2574 virtbase = ioremap(res->start, resource_size(res));
2578 /* HW version check */
2579 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2580 if (dma_id_regs[i].val !=
2581 readl(virtbase + dma_id_regs[i].reg)) {
2583 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2586 readl(virtbase + dma_id_regs[i].reg));
2591 /* Get silicon revision and designer */
2592 val = readl(virtbase + D40_DREG_PERIPHID2);
2594 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2596 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2597 val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2602 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2603 D40_DREG_PERIPHID2_REV_POS;
2605 /* The number of physical channels on this HW */
2606 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2608 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2611 plat_data = pdev->dev.platform_data;
2613 /* Count the number of logical channels in use */
2614 for (i = 0; i < plat_data->dev_len; i++)
2615 if (plat_data->dev_rx[i] != 0)
2618 for (i = 0; i < plat_data->dev_len; i++)
2619 if (plat_data->dev_tx[i] != 0)
2622 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2623 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2624 sizeof(struct d40_chan), GFP_KERNEL);
2627 d40_err(&pdev->dev, "Out of memory\n");
2633 base->num_phy_chans = num_phy_chans;
2634 base->num_log_chans = num_log_chans;
2635 base->phy_start = res->start;
2636 base->phy_size = resource_size(res);
2637 base->virtbase = virtbase;
2638 base->plat_data = plat_data;
2639 base->dev = &pdev->dev;
2640 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2641 base->log_chans = &base->phy_chans[num_phy_chans];
2643 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2648 base->lookup_phy_chans = kzalloc(num_phy_chans *
2649 sizeof(struct d40_chan *),
2651 if (!base->lookup_phy_chans)
2654 if (num_log_chans + plat_data->memcpy_len) {
2656 * The max number of logical channels are event lines for all
2657 * src devices and dst devices
2659 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2660 sizeof(struct d40_chan *),
2662 if (!base->lookup_log_chans)
2666 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2667 sizeof(struct d40_desc *) *
2668 D40_LCLA_LINK_PER_EVENT_GRP,
2670 if (!base->lcla_pool.alloc_map)
2673 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2674 0, SLAB_HWCACHE_ALIGN,
2676 if (base->desc_slab == NULL)
2689 release_mem_region(res->start,
2690 resource_size(res));
2695 kfree(base->lcla_pool.alloc_map);
2696 kfree(base->lookup_log_chans);
2697 kfree(base->lookup_phy_chans);
2698 kfree(base->phy_res);
2705 static void __init d40_hw_init(struct d40_base *base)
2708 static const struct d40_reg_val dma_init_reg[] = {
2709 /* Clock every part of the DMA block from start */
2710 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2712 /* Interrupts on all logical channels */
2713 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2714 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2715 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2716 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2717 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2718 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2719 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2720 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2721 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2722 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2723 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2724 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2727 u32 prmseo[2] = {0, 0};
2728 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2732 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2733 writel(dma_init_reg[i].val,
2734 base->virtbase + dma_init_reg[i].reg);
2736 /* Configure all our dma channels to default settings */
2737 for (i = 0; i < base->num_phy_chans; i++) {
2739 activeo[i % 2] = activeo[i % 2] << 2;
2741 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2743 activeo[i % 2] |= 3;
2747 /* Enable interrupt # */
2748 pcmis = (pcmis << 1) | 1;
2750 /* Clear interrupt # */
2751 pcicr = (pcicr << 1) | 1;
2753 /* Set channel to physical mode */
2754 prmseo[i % 2] = prmseo[i % 2] << 2;
2759 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2760 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2761 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2762 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2764 /* Write which interrupt to enable */
2765 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2767 /* Write which interrupt to clear */
2768 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2772 static int __init d40_lcla_allocate(struct d40_base *base)
2774 unsigned long *page_list;
2779 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2780 * To full fill this hardware requirement without wasting 256 kb
2781 * we allocate pages until we get an aligned one.
2783 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2791 /* Calculating how many pages that are required */
2792 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2794 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2795 page_list[i] = __get_free_pages(GFP_KERNEL,
2796 base->lcla_pool.pages);
2797 if (!page_list[i]) {
2799 d40_err(base->dev, "Failed to allocate %d pages.\n",
2800 base->lcla_pool.pages);
2802 for (j = 0; j < i; j++)
2803 free_pages(page_list[j], base->lcla_pool.pages);
2807 if ((virt_to_phys((void *)page_list[i]) &
2808 (LCLA_ALIGNMENT - 1)) == 0)
2812 for (j = 0; j < i; j++)
2813 free_pages(page_list[j], base->lcla_pool.pages);
2815 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2816 base->lcla_pool.base = (void *)page_list[i];
2819 * After many attempts and no succees with finding the correct
2820 * alignment, try with allocating a big buffer.
2823 "[%s] Failed to get %d pages @ 18 bit align.\n",
2824 __func__, base->lcla_pool.pages);
2825 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2826 base->num_phy_chans +
2829 if (!base->lcla_pool.base_unaligned) {
2834 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2838 writel(virt_to_phys(base->lcla_pool.base),
2839 base->virtbase + D40_DREG_LCLA);
2845 static int __init d40_probe(struct platform_device *pdev)
2849 struct d40_base *base;
2850 struct resource *res = NULL;
2851 int num_reserved_chans;
2854 base = d40_hw_detect_init(pdev);
2859 num_reserved_chans = d40_phy_res_init(base);
2861 platform_set_drvdata(pdev, base);
2863 spin_lock_init(&base->interrupt_lock);
2864 spin_lock_init(&base->execmd_lock);
2866 /* Get IO for logical channel parameter address */
2867 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2870 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2873 base->lcpa_size = resource_size(res);
2874 base->phy_lcpa = res->start;
2876 if (request_mem_region(res->start, resource_size(res),
2877 D40_NAME " I/O lcpa") == NULL) {
2880 "Failed to request LCPA region 0x%x-0x%x\n",
2881 res->start, res->end);
2885 /* We make use of ESRAM memory for this. */
2886 val = readl(base->virtbase + D40_DREG_LCPA);
2887 if (res->start != val && val != 0) {
2888 dev_warn(&pdev->dev,
2889 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2890 __func__, val, res->start);
2892 writel(res->start, base->virtbase + D40_DREG_LCPA);
2894 base->lcpa_base = ioremap(res->start, resource_size(res));
2895 if (!base->lcpa_base) {
2897 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2901 ret = d40_lcla_allocate(base);
2903 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2907 spin_lock_init(&base->lcla_pool.lock);
2909 base->irq = platform_get_irq(pdev, 0);
2911 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2913 d40_err(&pdev->dev, "No IRQ defined\n");
2917 err = d40_dmaengine_init(base, num_reserved_chans);
2923 dev_info(base->dev, "initialized\n");
2928 if (base->desc_slab)
2929 kmem_cache_destroy(base->desc_slab);
2931 iounmap(base->virtbase);
2932 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2933 free_pages((unsigned long)base->lcla_pool.base,
2934 base->lcla_pool.pages);
2936 kfree(base->lcla_pool.base_unaligned);
2939 release_mem_region(base->phy_lcpa,
2941 if (base->phy_start)
2942 release_mem_region(base->phy_start,
2945 clk_disable(base->clk);
2949 kfree(base->lcla_pool.alloc_map);
2950 kfree(base->lookup_log_chans);
2951 kfree(base->lookup_phy_chans);
2952 kfree(base->phy_res);
2956 d40_err(&pdev->dev, "probe failed\n");
2960 static struct platform_driver d40_driver = {
2962 .owner = THIS_MODULE,
2967 static int __init stedma40_init(void)
2969 return platform_driver_probe(&d40_driver, d40_probe);
2971 arch_initcall(stedma40_init);