2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/dmaengine.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
17 #include <plat/ste_dma40.h>
19 #include "ste_dma40_ll.h"
21 #define D40_NAME "dma40"
23 #define D40_PHY_CHAN -1
25 /* For masking out/in 2 bit channel positions */
26 #define D40_CHAN_POS(chan) (2 * (chan / 2))
27 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29 /* Maximum iterations taken before giving up suspending a channel */
30 #define D40_SUSPEND_MAX_IT 500
32 /* Hardware requirement on LCLA alignment */
33 #define LCLA_ALIGNMENT 0x40000
35 /* Max number of links per event group */
36 #define D40_LCLA_LINK_PER_EVENT_GRP 128
37 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
39 /* Attempts before giving up to trying to get pages that are aligned */
40 #define MAX_LCLA_ALLOC_ATTEMPTS 256
42 /* Bit markings for allocation map */
43 #define D40_ALLOC_FREE (1 << 31)
44 #define D40_ALLOC_PHY (1 << 30)
45 #define D40_ALLOC_LOG_FREE 0
47 /* Hardware designer of the block */
48 #define D40_HW_DESIGNER 0x8
51 * enum 40_command - The different commands and/or statuses.
53 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
54 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
55 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
56 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
61 D40_DMA_SUSPEND_REQ = 2,
66 * struct d40_lli_pool - Structure for keeping LLIs in memory
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used.
71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
73 * one buffer to one buffer.
78 /* Space for dst and src, plus an extra for padding */
79 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
83 * struct d40_desc - A descriptor is one DMA job.
85 * @lli_phy: LLI settings for physical channel. Both src and dst=
86 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
88 * @lli_log: Same as above but for logical channels.
89 * @lli_pool: The pool with two entries pre-allocated.
90 * @lli_len: Number of llis of current descriptor.
91 * @lli_current: Number of transfered llis.
92 * @lcla_alloc: Number of LCLA entries allocated.
93 * @txd: DMA engine struct. Used for among other things for communication
96 * @is_in_client_list: true if the client owns this descriptor.
99 * This descriptor is used for both logical and physical transfers.
103 struct d40_phy_lli_bidir lli_phy;
105 struct d40_log_lli_bidir lli_log;
107 struct d40_lli_pool lli_pool;
112 struct dma_async_tx_descriptor txd;
113 struct list_head node;
115 bool is_in_client_list;
119 * struct d40_lcla_pool - LCLA pool settings and data.
121 * @base: The virtual address of LCLA. 18 bit aligned.
122 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
123 * This pointer is only there for clean-up on error.
124 * @pages: The number of pages needed for all physical channels.
125 * Only used later for clean-up on error
126 * @lock: Lock to protect the content in this struct.
127 * @alloc_map: big map over which LCLA entry is own by which job.
129 struct d40_lcla_pool {
131 void *base_unaligned;
134 struct d40_desc **alloc_map;
138 * struct d40_phy_res - struct for handling eventlines mapped to physical
141 * @lock: A lock protection this entity.
142 * @num: The physical channel number of this entity.
143 * @allocated_src: Bit mapped to show which src event line's are mapped to
144 * this physical channel. Can also be free or physically allocated.
145 * @allocated_dst: Same as for src but is dst.
146 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
159 * struct d40_chan - Struct that describes a channel.
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
167 * @busy: Set to true when transfer is ongoing on this channel.
168 * @phy_chan: Pointer to physical channel which this instance runs on. If this
169 * point is NULL, then the channel is not allocated.
170 * @chan: DMA engine handle.
171 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
172 * transfer and call client callback.
173 * @client: Cliented owned descriptor list.
174 * @active: Active descriptor.
175 * @queue: Queued jobs.
176 * @dma_cfg: The client configuration of this dma channel.
177 * @configured: whether the dma_cfg configuration is valid
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
185 * This struct can either "be" a logical or a physical channel.
190 /* ID of the most recent completed transfer */
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
200 struct stedma40_chan_cfg dma_cfg;
202 struct d40_base *base;
203 /* Default register configurations */
206 struct d40_def_lcsp log_def;
207 struct d40_log_lli_full *lcpa;
208 /* Runtime reconfiguration */
209 dma_addr_t runtime_addr;
210 enum dma_data_direction runtime_direction;
214 * struct d40_base - The big global struct, one for each probe'd instance.
216 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
217 * @execmd_lock: Lock for execute command usage since several channels share
218 * the same physical register.
219 * @dev: The device structure.
220 * @virtbase: The virtual base address of the DMA's register.
221 * @rev: silicon revision detected.
222 * @clk: Pointer to the DMA clock structure.
223 * @phy_start: Physical memory start of the DMA registers.
224 * @phy_size: Size of the DMA register map.
225 * @irq: The IRQ number.
226 * @num_phy_chans: The number of physical channels. Read from HW. This
227 * is the number of available channels for this driver, not counting "Secure
228 * mode" allocated physical channels.
229 * @num_log_chans: The number of logical channels. Calculated from
231 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
232 * @dma_slave: dma_device channels that can do only do slave transfers.
233 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
234 * @log_chans: Room for all possible logical channels in system.
235 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
236 * to log_chans entries.
237 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
238 * to phy_chans entries.
239 * @plat_data: Pointer to provided platform_data which is the driver
241 * @phy_res: Vector containing all physical channels.
242 * @lcla_pool: lcla pool settings and data.
243 * @lcpa_base: The virtual mapped address of LCPA.
244 * @phy_lcpa: The physical address of the LCPA.
245 * @lcpa_size: The size of the LCPA area.
246 * @desc_slab: cache for descriptors.
249 spinlock_t interrupt_lock;
250 spinlock_t execmd_lock;
252 void __iomem *virtbase;
255 phys_addr_t phy_start;
256 resource_size_t phy_size;
260 struct dma_device dma_both;
261 struct dma_device dma_slave;
262 struct dma_device dma_memcpy;
263 struct d40_chan *phy_chans;
264 struct d40_chan *log_chans;
265 struct d40_chan **lookup_log_chans;
266 struct d40_chan **lookup_phy_chans;
267 struct stedma40_platform_data *plat_data;
268 /* Physical half channels */
269 struct d40_phy_res *phy_res;
270 struct d40_lcla_pool lcla_pool;
273 resource_size_t lcpa_size;
274 struct kmem_cache *desc_slab;
278 * struct d40_interrupt_lookup - lookup table for interrupt handler
280 * @src: Interrupt mask register.
281 * @clr: Interrupt clear register.
282 * @is_error: true if this is an error interrupt.
283 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
284 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
286 struct d40_interrupt_lookup {
294 * struct d40_reg_val - simple lookup struct
296 * @reg: The register.
297 * @val: The value that belongs to the register in reg.
304 static struct device *chan2dev(struct d40_chan *d40c)
306 return &d40c->chan.dev->device;
309 static bool chan_is_physical(struct d40_chan *chan)
311 return chan->log_num == D40_PHY_CHAN;
314 static bool chan_is_logical(struct d40_chan *chan)
316 return !chan_is_physical(chan);
319 static void __iomem *chan_base(struct d40_chan *chan)
321 return chan->base->virtbase + D40_DREG_PCBASE +
322 chan->phy_chan->num * D40_DREG_PCDELTA;
325 #define d40_err(dev, format, arg...) \
326 dev_err(dev, "[%s] " format, __func__, ## arg)
328 #define chan_err(d40c, format, arg...) \
329 d40_err(chan2dev(d40c), format, ## arg)
331 static int d40_pool_lli_alloc(struct d40_desc *d40d,
332 int lli_len, bool is_log)
338 align = sizeof(struct d40_log_lli);
340 align = sizeof(struct d40_phy_lli);
343 base = d40d->lli_pool.pre_alloc_lli;
344 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
345 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
349 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
350 d40d->lli_pool.base = base;
352 if (d40d->lli_pool.base == NULL)
357 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
359 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
362 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
364 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
371 static void d40_pool_lli_free(struct d40_desc *d40d)
373 kfree(d40d->lli_pool.base);
374 d40d->lli_pool.base = NULL;
375 d40d->lli_pool.size = 0;
376 d40d->lli_log.src = NULL;
377 d40d->lli_log.dst = NULL;
378 d40d->lli_phy.src = NULL;
379 d40d->lli_phy.dst = NULL;
382 static int d40_lcla_alloc_one(struct d40_chan *d40c,
383 struct d40_desc *d40d)
390 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
392 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
395 * Allocate both src and dst at the same time, therefore the half
396 * start on 1 since 0 can't be used since zero is used as end marker.
398 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
399 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
400 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
407 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
412 static int d40_lcla_free_all(struct d40_chan *d40c,
413 struct d40_desc *d40d)
419 if (chan_is_physical(d40c))
422 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
424 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
425 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
426 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
427 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
428 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
430 if (d40d->lcla_alloc == 0) {
437 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
443 static void d40_desc_remove(struct d40_desc *d40d)
445 list_del(&d40d->node);
448 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
450 struct d40_desc *desc = NULL;
452 if (!list_empty(&d40c->client)) {
456 list_for_each_entry_safe(d, _d, &d40c->client, node)
457 if (async_tx_test_ack(&d->txd)) {
458 d40_pool_lli_free(d);
461 memset(desc, 0, sizeof(*desc));
467 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
470 INIT_LIST_HEAD(&desc->node);
475 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
478 d40_lcla_free_all(d40c, d40d);
479 kmem_cache_free(d40c->base->desc_slab, d40d);
482 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
484 list_add_tail(&desc->node, &d40c->active);
487 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
489 int curr_lcla = -EINVAL, next_lcla;
491 if (chan_is_physical(d40c)) {
492 d40_phy_lli_write(d40c->base->virtbase,
496 d40d->lli_current = d40d->lli_len;
499 if ((d40d->lli_len - d40d->lli_current) > 1)
500 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
502 d40_log_lli_lcpa_write(d40c->lcpa,
503 &d40d->lli_log.dst[d40d->lli_current],
504 &d40d->lli_log.src[d40d->lli_current],
508 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
509 struct d40_log_lli *lcla;
511 if (d40d->lli_current + 1 < d40d->lli_len)
512 next_lcla = d40_lcla_alloc_one(d40c, d40d);
516 lcla = d40c->base->lcla_pool.base +
517 d40c->phy_chan->num * 1024 +
520 d40_log_lli_lcla_write(lcla,
521 &d40d->lli_log.dst[d40d->lli_current],
522 &d40d->lli_log.src[d40d->lli_current],
525 (void) dma_map_single(d40c->base->dev, lcla,
526 2 * sizeof(struct d40_log_lli),
529 curr_lcla = next_lcla;
531 if (curr_lcla == -EINVAL) {
540 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
544 if (list_empty(&d40c->active))
547 d = list_first_entry(&d40c->active,
553 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
555 list_add_tail(&desc->node, &d40c->queue);
558 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
562 if (list_empty(&d40c->queue))
565 d = list_first_entry(&d40c->queue,
571 static int d40_psize_2_burst_size(bool is_log, int psize)
574 if (psize == STEDMA40_PSIZE_LOG_1)
577 if (psize == STEDMA40_PSIZE_PHY_1)
585 * The dma only supports transmitting packages up to
586 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
587 * dma elements required to send the entire sg list
589 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
592 u32 max_w = max(data_width1, data_width2);
593 u32 min_w = min(data_width1, data_width2);
594 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
596 if (seg_max > STEDMA40_MAX_SEG_SIZE)
597 seg_max -= (1 << max_w);
599 if (!IS_ALIGNED(size, 1 << max_w))
605 dmalen = size / seg_max;
606 if (dmalen * seg_max < size)
612 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
613 u32 data_width1, u32 data_width2)
615 struct scatterlist *sg;
620 for_each_sg(sgl, sg, sg_len, i) {
621 ret = d40_size_2_dmalen(sg_dma_len(sg),
622 data_width1, data_width2);
630 /* Support functions for logical channels */
632 static int d40_channel_execute_command(struct d40_chan *d40c,
633 enum d40_command command)
637 void __iomem *active_reg;
642 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
644 if (d40c->phy_chan->num % 2 == 0)
645 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
647 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
649 if (command == D40_DMA_SUSPEND_REQ) {
650 status = (readl(active_reg) &
651 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
652 D40_CHAN_POS(d40c->phy_chan->num);
654 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
658 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
659 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
662 if (command == D40_DMA_SUSPEND_REQ) {
664 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
665 status = (readl(active_reg) &
666 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
667 D40_CHAN_POS(d40c->phy_chan->num);
671 * Reduce the number of bus accesses while
672 * waiting for the DMA to suspend.
676 if (status == D40_DMA_STOP ||
677 status == D40_DMA_SUSPENDED)
681 if (i == D40_SUSPEND_MAX_IT) {
683 "unable to suspend the chl %d (log: %d) status %x\n",
684 d40c->phy_chan->num, d40c->log_num,
692 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
696 static void d40_term_all(struct d40_chan *d40c)
698 struct d40_desc *d40d;
700 /* Release active descriptors */
701 while ((d40d = d40_first_active_get(d40c))) {
702 d40_desc_remove(d40d);
703 d40_desc_free(d40c, d40d);
706 /* Release queued descriptors waiting for transfer */
707 while ((d40d = d40_first_queued(d40c))) {
708 d40_desc_remove(d40d);
709 d40_desc_free(d40c, d40d);
713 d40c->pending_tx = 0;
717 static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
720 void __iomem *addr = chan_base(d40c) + reg;
724 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
725 | ~D40_EVENTLINE_MASK(event), addr);
730 * The hardware sometimes doesn't register the enable when src and dst
731 * event lines are active on the same logical channel. Retry to ensure
732 * it does. Usually only one retry is sufficient.
736 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
737 | ~D40_EVENTLINE_MASK(event), addr);
739 if (readl(addr) & D40_EVENTLINE_MASK(event))
744 dev_dbg(chan2dev(d40c),
745 "[%s] workaround enable S%cLNK (%d tries)\n",
746 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
752 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
756 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
758 /* Enable event line connected to device (or memcpy) */
759 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
760 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
761 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
763 __d40_config_set_event(d40c, do_enable, event,
767 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
768 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
770 __d40_config_set_event(d40c, do_enable, event,
774 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
777 static u32 d40_chan_has_events(struct d40_chan *d40c)
779 void __iomem *chanbase = chan_base(d40c);
782 val = readl(chanbase + D40_CHAN_REG_SSLNK);
783 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
788 static u32 d40_get_prmo(struct d40_chan *d40c)
790 static const unsigned int phy_map[] = {
791 [STEDMA40_PCHAN_BASIC_MODE]
792 = D40_DREG_PRMO_PCHAN_BASIC,
793 [STEDMA40_PCHAN_MODULO_MODE]
794 = D40_DREG_PRMO_PCHAN_MODULO,
795 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
796 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
798 static const unsigned int log_map[] = {
799 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
800 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
801 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
802 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
803 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
804 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
807 if (chan_is_physical(d40c))
808 return phy_map[d40c->dma_cfg.mode_opt];
810 return log_map[d40c->dma_cfg.mode_opt];
813 static void d40_config_write(struct d40_chan *d40c)
818 /* Odd addresses are even addresses + 4 */
819 addr_base = (d40c->phy_chan->num % 2) * 4;
820 /* Setup channel mode to logical or physical */
821 var = ((u32)(chan_is_logical(d40c)) + 1) <<
822 D40_CHAN_POS(d40c->phy_chan->num);
823 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
825 /* Setup operational mode option register */
826 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
828 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
830 if (chan_is_logical(d40c)) {
831 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
832 & D40_SREG_ELEM_LOG_LIDX_MASK;
833 void __iomem *chanbase = chan_base(d40c);
835 /* Set default config for CFG reg */
836 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
837 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
839 /* Set LIDX for lcla */
840 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
841 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
845 static u32 d40_residue(struct d40_chan *d40c)
849 if (chan_is_logical(d40c))
850 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
851 >> D40_MEM_LCSP2_ECNT_POS;
853 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
854 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
855 >> D40_SREG_ELEM_PHY_ECNT_POS;
858 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
861 static bool d40_tx_is_linked(struct d40_chan *d40c)
865 if (chan_is_logical(d40c))
866 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
868 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
869 & D40_SREG_LNK_PHYS_LNK_MASK;
874 static int d40_pause(struct dma_chan *chan)
876 struct d40_chan *d40c =
877 container_of(chan, struct d40_chan, chan);
884 spin_lock_irqsave(&d40c->lock, flags);
886 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
888 if (chan_is_logical(d40c)) {
889 d40_config_set_event(d40c, false);
890 /* Resume the other logical channels if any */
891 if (d40_chan_has_events(d40c))
892 res = d40_channel_execute_command(d40c,
897 spin_unlock_irqrestore(&d40c->lock, flags);
901 static int d40_resume(struct dma_chan *chan)
903 struct d40_chan *d40c =
904 container_of(chan, struct d40_chan, chan);
911 spin_lock_irqsave(&d40c->lock, flags);
913 if (d40c->base->rev == 0)
914 if (chan_is_logical(d40c)) {
915 res = d40_channel_execute_command(d40c,
916 D40_DMA_SUSPEND_REQ);
920 /* If bytes left to transfer or linked tx resume job */
921 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
923 if (chan_is_logical(d40c))
924 d40_config_set_event(d40c, true);
926 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
930 spin_unlock_irqrestore(&d40c->lock, flags);
934 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
936 struct d40_chan *d40c = container_of(tx->chan,
939 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
942 spin_lock_irqsave(&d40c->lock, flags);
946 if (d40c->chan.cookie < 0)
947 d40c->chan.cookie = 1;
949 d40d->txd.cookie = d40c->chan.cookie;
951 d40_desc_queue(d40c, d40d);
953 spin_unlock_irqrestore(&d40c->lock, flags);
958 static int d40_start(struct d40_chan *d40c)
960 if (d40c->base->rev == 0) {
963 if (chan_is_logical(d40c)) {
964 err = d40_channel_execute_command(d40c,
965 D40_DMA_SUSPEND_REQ);
971 if (chan_is_logical(d40c))
972 d40_config_set_event(d40c, true);
974 return d40_channel_execute_command(d40c, D40_DMA_RUN);
977 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
979 struct d40_desc *d40d;
982 /* Start queued jobs, if any */
983 d40d = d40_first_queued(d40c);
988 /* Remove from queue */
989 d40_desc_remove(d40d);
991 /* Add to active queue */
992 d40_desc_submit(d40c, d40d);
994 /* Initiate DMA job */
995 d40_desc_load(d40c, d40d);
998 err = d40_start(d40c);
1007 /* called from interrupt context */
1008 static void dma_tc_handle(struct d40_chan *d40c)
1010 struct d40_desc *d40d;
1012 /* Get first active entry from list */
1013 d40d = d40_first_active_get(d40c);
1018 d40_lcla_free_all(d40c, d40d);
1020 if (d40d->lli_current < d40d->lli_len) {
1021 d40_desc_load(d40c, d40d);
1023 (void) d40_start(d40c);
1027 if (d40_queue_start(d40c) == NULL)
1031 tasklet_schedule(&d40c->tasklet);
1035 static void dma_tasklet(unsigned long data)
1037 struct d40_chan *d40c = (struct d40_chan *) data;
1038 struct d40_desc *d40d;
1039 unsigned long flags;
1040 dma_async_tx_callback callback;
1041 void *callback_param;
1043 spin_lock_irqsave(&d40c->lock, flags);
1045 /* Get first active entry from list */
1046 d40d = d40_first_active_get(d40c);
1051 d40c->completed = d40d->txd.cookie;
1054 * If terminating a channel pending_tx is set to zero.
1055 * This prevents any finished active jobs to return to the client.
1057 if (d40c->pending_tx == 0) {
1058 spin_unlock_irqrestore(&d40c->lock, flags);
1062 /* Callback to client */
1063 callback = d40d->txd.callback;
1064 callback_param = d40d->txd.callback_param;
1066 if (async_tx_test_ack(&d40d->txd)) {
1067 d40_pool_lli_free(d40d);
1068 d40_desc_remove(d40d);
1069 d40_desc_free(d40c, d40d);
1071 if (!d40d->is_in_client_list) {
1072 d40_desc_remove(d40d);
1073 d40_lcla_free_all(d40c, d40d);
1074 list_add_tail(&d40d->node, &d40c->client);
1075 d40d->is_in_client_list = true;
1081 if (d40c->pending_tx)
1082 tasklet_schedule(&d40c->tasklet);
1084 spin_unlock_irqrestore(&d40c->lock, flags);
1086 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1087 callback(callback_param);
1092 /* Rescue manouver if receiving double interrupts */
1093 if (d40c->pending_tx > 0)
1095 spin_unlock_irqrestore(&d40c->lock, flags);
1098 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1100 static const struct d40_interrupt_lookup il[] = {
1101 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1102 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1103 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1104 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1105 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1106 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1107 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1108 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1109 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1110 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1114 u32 regs[ARRAY_SIZE(il)];
1118 struct d40_chan *d40c;
1119 unsigned long flags;
1120 struct d40_base *base = data;
1122 spin_lock_irqsave(&base->interrupt_lock, flags);
1124 /* Read interrupt status of both logical and physical channels */
1125 for (i = 0; i < ARRAY_SIZE(il); i++)
1126 regs[i] = readl(base->virtbase + il[i].src);
1130 chan = find_next_bit((unsigned long *)regs,
1131 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1133 /* No more set bits found? */
1134 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1137 row = chan / BITS_PER_LONG;
1138 idx = chan & (BITS_PER_LONG - 1);
1141 writel(1 << idx, base->virtbase + il[row].clr);
1143 if (il[row].offset == D40_PHY_CHAN)
1144 d40c = base->lookup_phy_chans[idx];
1146 d40c = base->lookup_log_chans[il[row].offset + idx];
1147 spin_lock(&d40c->lock);
1149 if (!il[row].is_error)
1150 dma_tc_handle(d40c);
1152 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1153 chan, il[row].offset, idx);
1155 spin_unlock(&d40c->lock);
1158 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1163 static int d40_validate_conf(struct d40_chan *d40c,
1164 struct stedma40_chan_cfg *conf)
1167 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1168 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1169 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1172 chan_err(d40c, "Invalid direction.\n");
1176 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1177 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1178 d40c->runtime_addr == 0) {
1180 chan_err(d40c, "Invalid TX channel address (%d)\n",
1181 conf->dst_dev_type);
1185 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1186 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1187 d40c->runtime_addr == 0) {
1188 chan_err(d40c, "Invalid RX channel address (%d)\n",
1189 conf->src_dev_type);
1193 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1194 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1195 chan_err(d40c, "Invalid dst\n");
1199 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1200 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1201 chan_err(d40c, "Invalid src\n");
1205 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1206 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1207 chan_err(d40c, "No event line\n");
1211 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1212 (src_event_group != dst_event_group)) {
1213 chan_err(d40c, "Invalid event group\n");
1217 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1219 * DMAC HW supports it. Will be added to this driver,
1220 * in case any dma client requires it.
1222 chan_err(d40c, "periph to periph not supported\n");
1226 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1227 (1 << conf->src_info.data_width) !=
1228 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1229 (1 << conf->dst_info.data_width)) {
1231 * The DMAC hardware only supports
1232 * src (burst x width) == dst (burst x width)
1235 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1242 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1243 int log_event_line, bool is_log)
1245 unsigned long flags;
1246 spin_lock_irqsave(&phy->lock, flags);
1248 /* Physical interrupts are masked per physical full channel */
1249 if (phy->allocated_src == D40_ALLOC_FREE &&
1250 phy->allocated_dst == D40_ALLOC_FREE) {
1251 phy->allocated_dst = D40_ALLOC_PHY;
1252 phy->allocated_src = D40_ALLOC_PHY;
1258 /* Logical channel */
1260 if (phy->allocated_src == D40_ALLOC_PHY)
1263 if (phy->allocated_src == D40_ALLOC_FREE)
1264 phy->allocated_src = D40_ALLOC_LOG_FREE;
1266 if (!(phy->allocated_src & (1 << log_event_line))) {
1267 phy->allocated_src |= 1 << log_event_line;
1272 if (phy->allocated_dst == D40_ALLOC_PHY)
1275 if (phy->allocated_dst == D40_ALLOC_FREE)
1276 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1278 if (!(phy->allocated_dst & (1 << log_event_line))) {
1279 phy->allocated_dst |= 1 << log_event_line;
1286 spin_unlock_irqrestore(&phy->lock, flags);
1289 spin_unlock_irqrestore(&phy->lock, flags);
1293 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1296 unsigned long flags;
1297 bool is_free = false;
1299 spin_lock_irqsave(&phy->lock, flags);
1300 if (!log_event_line) {
1301 phy->allocated_dst = D40_ALLOC_FREE;
1302 phy->allocated_src = D40_ALLOC_FREE;
1307 /* Logical channel */
1309 phy->allocated_src &= ~(1 << log_event_line);
1310 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1311 phy->allocated_src = D40_ALLOC_FREE;
1313 phy->allocated_dst &= ~(1 << log_event_line);
1314 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1315 phy->allocated_dst = D40_ALLOC_FREE;
1318 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1322 spin_unlock_irqrestore(&phy->lock, flags);
1327 static int d40_allocate_channel(struct d40_chan *d40c)
1332 struct d40_phy_res *phys;
1337 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1339 phys = d40c->base->phy_res;
1341 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1342 dev_type = d40c->dma_cfg.src_dev_type;
1343 log_num = 2 * dev_type;
1345 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1346 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1347 /* dst event lines are used for logical memcpy */
1348 dev_type = d40c->dma_cfg.dst_dev_type;
1349 log_num = 2 * dev_type + 1;
1354 event_group = D40_TYPE_TO_GROUP(dev_type);
1355 event_line = D40_TYPE_TO_EVENT(dev_type);
1358 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1359 /* Find physical half channel */
1360 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1362 if (d40_alloc_mask_set(&phys[i], is_src,
1367 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1368 int phy_num = j + event_group * 2;
1369 for (i = phy_num; i < phy_num + 2; i++) {
1370 if (d40_alloc_mask_set(&phys[i],
1379 d40c->phy_chan = &phys[i];
1380 d40c->log_num = D40_PHY_CHAN;
1386 /* Find logical channel */
1387 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1388 int phy_num = j + event_group * 2;
1390 * Spread logical channels across all available physical rather
1391 * than pack every logical channel at the first available phy
1395 for (i = phy_num; i < phy_num + 2; i++) {
1396 if (d40_alloc_mask_set(&phys[i], is_src,
1397 event_line, is_log))
1401 for (i = phy_num + 1; i >= phy_num; i--) {
1402 if (d40_alloc_mask_set(&phys[i], is_src,
1403 event_line, is_log))
1411 d40c->phy_chan = &phys[i];
1412 d40c->log_num = log_num;
1416 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1418 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1424 static int d40_config_memcpy(struct d40_chan *d40c)
1426 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1428 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1429 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1430 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1431 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1432 memcpy[d40c->chan.chan_id];
1434 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1435 dma_has_cap(DMA_SLAVE, cap)) {
1436 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1438 chan_err(d40c, "No memcpy\n");
1446 static int d40_free_dma(struct d40_chan *d40c)
1451 struct d40_phy_res *phy = d40c->phy_chan;
1454 struct d40_desc *_d;
1457 /* Terminate all queued and active transfers */
1460 /* Release client owned descriptors */
1461 if (!list_empty(&d40c->client))
1462 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1463 d40_pool_lli_free(d);
1465 d40_desc_free(d40c, d);
1469 chan_err(d40c, "phy == null\n");
1473 if (phy->allocated_src == D40_ALLOC_FREE &&
1474 phy->allocated_dst == D40_ALLOC_FREE) {
1475 chan_err(d40c, "channel already free\n");
1479 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1480 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1481 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1483 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1484 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1487 chan_err(d40c, "Unknown direction\n");
1491 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1493 chan_err(d40c, "suspend failed\n");
1497 if (chan_is_logical(d40c)) {
1498 /* Release logical channel, deactivate the event line */
1500 d40_config_set_event(d40c, false);
1501 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1504 * Check if there are more logical allocation
1505 * on this phy channel.
1507 if (!d40_alloc_mask_free(phy, is_src, event)) {
1508 /* Resume the other logical channels if any */
1509 if (d40_chan_has_events(d40c)) {
1510 res = d40_channel_execute_command(d40c,
1514 "Executing RUN command\n");
1521 (void) d40_alloc_mask_free(phy, is_src, 0);
1524 /* Release physical channel */
1525 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1527 chan_err(d40c, "Failed to stop channel\n");
1530 d40c->phy_chan = NULL;
1531 d40c->configured = false;
1532 d40c->base->lookup_phy_chans[phy->num] = NULL;
1537 static bool d40_is_paused(struct d40_chan *d40c)
1539 void __iomem *chanbase = chan_base(d40c);
1540 bool is_paused = false;
1541 unsigned long flags;
1542 void __iomem *active_reg;
1546 spin_lock_irqsave(&d40c->lock, flags);
1548 if (chan_is_physical(d40c)) {
1549 if (d40c->phy_chan->num % 2 == 0)
1550 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1552 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1554 status = (readl(active_reg) &
1555 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1556 D40_CHAN_POS(d40c->phy_chan->num);
1557 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1563 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1564 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1565 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1566 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1567 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1568 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1569 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1571 chan_err(d40c, "Unknown direction\n");
1575 status = (status & D40_EVENTLINE_MASK(event)) >>
1576 D40_EVENTLINE_POS(event);
1578 if (status != D40_DMA_RUN)
1581 spin_unlock_irqrestore(&d40c->lock, flags);
1587 static u32 stedma40_residue(struct dma_chan *chan)
1589 struct d40_chan *d40c =
1590 container_of(chan, struct d40_chan, chan);
1592 unsigned long flags;
1594 spin_lock_irqsave(&d40c->lock, flags);
1595 bytes_left = d40_residue(d40c);
1596 spin_unlock_irqrestore(&d40c->lock, flags);
1601 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1602 struct scatterlist *sgl_dst,
1603 struct scatterlist *sgl_src,
1604 unsigned int sgl_len,
1605 unsigned long dma_flags)
1608 struct d40_desc *d40d;
1609 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1611 unsigned long flags;
1613 if (d40c->phy_chan == NULL) {
1614 chan_err(d40c, "Unallocated channel.\n");
1615 return ERR_PTR(-EINVAL);
1618 spin_lock_irqsave(&d40c->lock, flags);
1619 d40d = d40_desc_get(d40c);
1624 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1625 d40c->dma_cfg.src_info.data_width,
1626 d40c->dma_cfg.dst_info.data_width);
1627 if (d40d->lli_len < 0) {
1628 chan_err(d40c, "Unaligned size\n");
1632 d40d->lli_current = 0;
1633 d40d->txd.flags = dma_flags;
1635 if (chan_is_logical(d40c)) {
1637 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1638 chan_err(d40c, "Out of memory\n");
1642 (void) d40_log_sg_to_lli(sgl_src,
1645 d40c->log_def.lcsp1,
1646 d40c->dma_cfg.src_info.data_width,
1647 d40c->dma_cfg.dst_info.data_width);
1649 (void) d40_log_sg_to_lli(sgl_dst,
1652 d40c->log_def.lcsp3,
1653 d40c->dma_cfg.dst_info.data_width,
1654 d40c->dma_cfg.src_info.data_width);
1656 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1657 chan_err(d40c, "Out of memory\n");
1661 res = d40_phy_sg_to_lli(sgl_src,
1665 virt_to_phys(d40d->lli_phy.src),
1667 d40c->dma_cfg.src_info.data_width,
1668 d40c->dma_cfg.dst_info.data_width,
1669 d40c->dma_cfg.src_info.psize);
1674 res = d40_phy_sg_to_lli(sgl_dst,
1678 virt_to_phys(d40d->lli_phy.dst),
1680 d40c->dma_cfg.dst_info.data_width,
1681 d40c->dma_cfg.src_info.data_width,
1682 d40c->dma_cfg.dst_info.psize);
1687 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1688 d40d->lli_pool.size, DMA_TO_DEVICE);
1691 dma_async_tx_descriptor_init(&d40d->txd, chan);
1693 d40d->txd.tx_submit = d40_tx_submit;
1695 spin_unlock_irqrestore(&d40c->lock, flags);
1700 d40_desc_free(d40c, d40d);
1701 spin_unlock_irqrestore(&d40c->lock, flags);
1704 EXPORT_SYMBOL(stedma40_memcpy_sg);
1706 bool stedma40_filter(struct dma_chan *chan, void *data)
1708 struct stedma40_chan_cfg *info = data;
1709 struct d40_chan *d40c =
1710 container_of(chan, struct d40_chan, chan);
1714 err = d40_validate_conf(d40c, info);
1716 d40c->dma_cfg = *info;
1718 err = d40_config_memcpy(d40c);
1721 d40c->configured = true;
1725 EXPORT_SYMBOL(stedma40_filter);
1727 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1729 bool realtime = d40c->dma_cfg.realtime;
1730 bool highprio = d40c->dma_cfg.high_priority;
1731 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1732 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1733 u32 event = D40_TYPE_TO_EVENT(dev_type);
1734 u32 group = D40_TYPE_TO_GROUP(dev_type);
1735 u32 bit = 1 << event;
1737 /* Destination event lines are stored in the upper halfword */
1741 writel(bit, d40c->base->virtbase + prioreg + group * 4);
1742 writel(bit, d40c->base->virtbase + rtreg + group * 4);
1745 static void d40_set_prio_realtime(struct d40_chan *d40c)
1747 if (d40c->base->rev < 3)
1750 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1751 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1752 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1754 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
1755 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1756 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
1759 /* DMA ENGINE functions */
1760 static int d40_alloc_chan_resources(struct dma_chan *chan)
1763 unsigned long flags;
1764 struct d40_chan *d40c =
1765 container_of(chan, struct d40_chan, chan);
1767 spin_lock_irqsave(&d40c->lock, flags);
1769 d40c->completed = chan->cookie = 1;
1771 /* If no dma configuration is set use default configuration (memcpy) */
1772 if (!d40c->configured) {
1773 err = d40_config_memcpy(d40c);
1775 chan_err(d40c, "Failed to configure memcpy channel\n");
1779 is_free_phy = (d40c->phy_chan == NULL);
1781 err = d40_allocate_channel(d40c);
1783 chan_err(d40c, "Failed to allocate channel\n");
1787 /* Fill in basic CFG register values */
1788 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1789 &d40c->dst_def_cfg, chan_is_logical(d40c));
1791 d40_set_prio_realtime(d40c);
1793 if (chan_is_logical(d40c)) {
1794 d40_log_cfg(&d40c->dma_cfg,
1795 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1797 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1798 d40c->lcpa = d40c->base->lcpa_base +
1799 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1801 d40c->lcpa = d40c->base->lcpa_base +
1802 d40c->dma_cfg.dst_dev_type *
1803 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1807 * Only write channel configuration to the DMA if the physical
1808 * resource is free. In case of multiple logical channels
1809 * on the same physical resource, only the first write is necessary.
1812 d40_config_write(d40c);
1814 spin_unlock_irqrestore(&d40c->lock, flags);
1818 static void d40_free_chan_resources(struct dma_chan *chan)
1820 struct d40_chan *d40c =
1821 container_of(chan, struct d40_chan, chan);
1823 unsigned long flags;
1825 if (d40c->phy_chan == NULL) {
1826 chan_err(d40c, "Cannot free unallocated channel\n");
1831 spin_lock_irqsave(&d40c->lock, flags);
1833 err = d40_free_dma(d40c);
1836 chan_err(d40c, "Failed to free channel\n");
1837 spin_unlock_irqrestore(&d40c->lock, flags);
1840 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1844 unsigned long dma_flags)
1846 struct d40_desc *d40d;
1847 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1849 unsigned long flags;
1851 if (d40c->phy_chan == NULL) {
1852 chan_err(d40c, "Channel is not allocated.\n");
1853 return ERR_PTR(-EINVAL);
1856 spin_lock_irqsave(&d40c->lock, flags);
1857 d40d = d40_desc_get(d40c);
1860 chan_err(d40c, "Descriptor is NULL\n");
1864 d40d->txd.flags = dma_flags;
1865 d40d->lli_len = d40_size_2_dmalen(size,
1866 d40c->dma_cfg.src_info.data_width,
1867 d40c->dma_cfg.dst_info.data_width);
1868 if (d40d->lli_len < 0) {
1869 chan_err(d40c, "Unaligned size\n");
1874 dma_async_tx_descriptor_init(&d40d->txd, chan);
1876 d40d->txd.tx_submit = d40_tx_submit;
1878 if (chan_is_logical(d40c)) {
1880 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1881 chan_err(d40c, "Out of memory\n");
1884 d40d->lli_current = 0;
1886 if (d40_log_buf_to_lli(d40d->lli_log.src,
1889 d40c->log_def.lcsp1,
1890 d40c->dma_cfg.src_info.data_width,
1891 d40c->dma_cfg.dst_info.data_width,
1895 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1898 d40c->log_def.lcsp3,
1899 d40c->dma_cfg.dst_info.data_width,
1900 d40c->dma_cfg.src_info.data_width,
1906 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1907 chan_err(d40c, "Out of memory\n");
1911 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1914 d40c->dma_cfg.src_info.psize,
1918 d40c->dma_cfg.src_info.data_width,
1919 d40c->dma_cfg.dst_info.data_width,
1923 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1926 d40c->dma_cfg.dst_info.psize,
1930 d40c->dma_cfg.dst_info.data_width,
1931 d40c->dma_cfg.src_info.data_width,
1935 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1936 d40d->lli_pool.size, DMA_TO_DEVICE);
1939 spin_unlock_irqrestore(&d40c->lock, flags);
1944 d40_desc_free(d40c, d40d);
1945 spin_unlock_irqrestore(&d40c->lock, flags);
1949 static struct dma_async_tx_descriptor *
1950 d40_prep_sg(struct dma_chan *chan,
1951 struct scatterlist *dst_sg, unsigned int dst_nents,
1952 struct scatterlist *src_sg, unsigned int src_nents,
1953 unsigned long dma_flags)
1955 if (dst_nents != src_nents)
1958 return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
1961 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1962 struct d40_chan *d40c,
1963 struct scatterlist *sgl,
1964 unsigned int sg_len,
1965 enum dma_data_direction direction,
1966 unsigned long dma_flags)
1968 dma_addr_t dev_addr = 0;
1971 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
1972 d40c->dma_cfg.src_info.data_width,
1973 d40c->dma_cfg.dst_info.data_width);
1974 if (d40d->lli_len < 0) {
1975 chan_err(d40c, "Unaligned size\n");
1979 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1980 chan_err(d40c, "Out of memory\n");
1984 d40d->lli_current = 0;
1986 if (direction == DMA_FROM_DEVICE)
1987 if (d40c->runtime_addr)
1988 dev_addr = d40c->runtime_addr;
1990 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1991 else if (direction == DMA_TO_DEVICE)
1992 if (d40c->runtime_addr)
1993 dev_addr = d40c->runtime_addr;
1995 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2000 total_size = d40_log_sg_to_dev(sgl, sg_len,
2003 d40c->dma_cfg.src_info.data_width,
2004 d40c->dma_cfg.dst_info.data_width,
2014 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2015 struct d40_chan *d40c,
2016 struct scatterlist *sgl,
2017 unsigned int sgl_len,
2018 enum dma_data_direction direction,
2019 unsigned long dma_flags)
2021 dma_addr_t src_dev_addr;
2022 dma_addr_t dst_dev_addr;
2025 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2026 d40c->dma_cfg.src_info.data_width,
2027 d40c->dma_cfg.dst_info.data_width);
2028 if (d40d->lli_len < 0) {
2029 chan_err(d40c, "Unaligned size\n");
2033 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
2034 chan_err(d40c, "Out of memory\n");
2038 d40d->lli_current = 0;
2040 if (direction == DMA_FROM_DEVICE) {
2042 if (d40c->runtime_addr)
2043 src_dev_addr = d40c->runtime_addr;
2045 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2046 } else if (direction == DMA_TO_DEVICE) {
2047 if (d40c->runtime_addr)
2048 dst_dev_addr = d40c->runtime_addr;
2050 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2055 res = d40_phy_sg_to_lli(sgl,
2059 virt_to_phys(d40d->lli_phy.src),
2061 d40c->dma_cfg.src_info.data_width,
2062 d40c->dma_cfg.dst_info.data_width,
2063 d40c->dma_cfg.src_info.psize);
2067 res = d40_phy_sg_to_lli(sgl,
2071 virt_to_phys(d40d->lli_phy.dst),
2073 d40c->dma_cfg.dst_info.data_width,
2074 d40c->dma_cfg.src_info.data_width,
2075 d40c->dma_cfg.dst_info.psize);
2079 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2080 d40d->lli_pool.size, DMA_TO_DEVICE);
2084 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2085 struct scatterlist *sgl,
2086 unsigned int sg_len,
2087 enum dma_data_direction direction,
2088 unsigned long dma_flags)
2090 struct d40_desc *d40d;
2091 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2093 unsigned long flags;
2096 if (d40c->phy_chan == NULL) {
2097 chan_err(d40c, "Cannot prepare unallocated channel\n");
2098 return ERR_PTR(-EINVAL);
2101 spin_lock_irqsave(&d40c->lock, flags);
2102 d40d = d40_desc_get(d40c);
2107 if (chan_is_logical(d40c))
2108 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2109 direction, dma_flags);
2111 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2112 direction, dma_flags);
2114 chan_err(d40c, "Failed to prepare %s slave sg job: %d\n",
2115 chan_is_logical(d40c) ? "log" : "phy", err);
2119 d40d->txd.flags = dma_flags;
2121 dma_async_tx_descriptor_init(&d40d->txd, chan);
2123 d40d->txd.tx_submit = d40_tx_submit;
2125 spin_unlock_irqrestore(&d40c->lock, flags);
2130 d40_desc_free(d40c, d40d);
2131 spin_unlock_irqrestore(&d40c->lock, flags);
2135 static enum dma_status d40_tx_status(struct dma_chan *chan,
2136 dma_cookie_t cookie,
2137 struct dma_tx_state *txstate)
2139 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2140 dma_cookie_t last_used;
2141 dma_cookie_t last_complete;
2144 if (d40c->phy_chan == NULL) {
2145 chan_err(d40c, "Cannot read status of unallocated channel\n");
2149 last_complete = d40c->completed;
2150 last_used = chan->cookie;
2152 if (d40_is_paused(d40c))
2155 ret = dma_async_is_complete(cookie, last_complete, last_used);
2157 dma_set_tx_state(txstate, last_complete, last_used,
2158 stedma40_residue(chan));
2163 static void d40_issue_pending(struct dma_chan *chan)
2165 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2166 unsigned long flags;
2168 if (d40c->phy_chan == NULL) {
2169 chan_err(d40c, "Channel is not allocated!\n");
2173 spin_lock_irqsave(&d40c->lock, flags);
2175 /* Busy means that pending jobs are already being processed */
2177 (void) d40_queue_start(d40c);
2179 spin_unlock_irqrestore(&d40c->lock, flags);
2182 /* Runtime reconfiguration extension */
2183 static void d40_set_runtime_config(struct dma_chan *chan,
2184 struct dma_slave_config *config)
2186 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2187 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2188 enum dma_slave_buswidth config_addr_width;
2189 dma_addr_t config_addr;
2190 u32 config_maxburst;
2191 enum stedma40_periph_data_width addr_width;
2194 if (config->direction == DMA_FROM_DEVICE) {
2195 dma_addr_t dev_addr_rx =
2196 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2198 config_addr = config->src_addr;
2200 dev_dbg(d40c->base->dev,
2201 "channel has a pre-wired RX address %08x "
2202 "overriding with %08x\n",
2203 dev_addr_rx, config_addr);
2204 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2205 dev_dbg(d40c->base->dev,
2206 "channel was not configured for peripheral "
2207 "to memory transfer (%d) overriding\n",
2209 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2211 config_addr_width = config->src_addr_width;
2212 config_maxburst = config->src_maxburst;
2214 } else if (config->direction == DMA_TO_DEVICE) {
2215 dma_addr_t dev_addr_tx =
2216 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2218 config_addr = config->dst_addr;
2220 dev_dbg(d40c->base->dev,
2221 "channel has a pre-wired TX address %08x "
2222 "overriding with %08x\n",
2223 dev_addr_tx, config_addr);
2224 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2225 dev_dbg(d40c->base->dev,
2226 "channel was not configured for memory "
2227 "to peripheral transfer (%d) overriding\n",
2229 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2231 config_addr_width = config->dst_addr_width;
2232 config_maxburst = config->dst_maxburst;
2235 dev_err(d40c->base->dev,
2236 "unrecognized channel direction %d\n",
2241 switch (config_addr_width) {
2242 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2243 addr_width = STEDMA40_BYTE_WIDTH;
2245 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2246 addr_width = STEDMA40_HALFWORD_WIDTH;
2248 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2249 addr_width = STEDMA40_WORD_WIDTH;
2251 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2252 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2255 dev_err(d40c->base->dev,
2256 "illegal peripheral address width "
2258 config->src_addr_width);
2262 if (chan_is_logical(d40c)) {
2263 if (config_maxburst >= 16)
2264 psize = STEDMA40_PSIZE_LOG_16;
2265 else if (config_maxburst >= 8)
2266 psize = STEDMA40_PSIZE_LOG_8;
2267 else if (config_maxburst >= 4)
2268 psize = STEDMA40_PSIZE_LOG_4;
2270 psize = STEDMA40_PSIZE_LOG_1;
2272 if (config_maxburst >= 16)
2273 psize = STEDMA40_PSIZE_PHY_16;
2274 else if (config_maxburst >= 8)
2275 psize = STEDMA40_PSIZE_PHY_8;
2276 else if (config_maxburst >= 4)
2277 psize = STEDMA40_PSIZE_PHY_4;
2278 else if (config_maxburst >= 2)
2279 psize = STEDMA40_PSIZE_PHY_2;
2281 psize = STEDMA40_PSIZE_PHY_1;
2284 /* Set up all the endpoint configs */
2285 cfg->src_info.data_width = addr_width;
2286 cfg->src_info.psize = psize;
2287 cfg->src_info.big_endian = false;
2288 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2289 cfg->dst_info.data_width = addr_width;
2290 cfg->dst_info.psize = psize;
2291 cfg->dst_info.big_endian = false;
2292 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2294 /* Fill in register values */
2295 if (chan_is_logical(d40c))
2296 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2298 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2299 &d40c->dst_def_cfg, false);
2301 /* These settings will take precedence later */
2302 d40c->runtime_addr = config_addr;
2303 d40c->runtime_direction = config->direction;
2304 dev_dbg(d40c->base->dev,
2305 "configured channel %s for %s, data width %d, "
2306 "maxburst %d bytes, LE, no flow control\n",
2307 dma_chan_name(chan),
2308 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2313 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2316 unsigned long flags;
2317 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2319 if (d40c->phy_chan == NULL) {
2320 chan_err(d40c, "Channel is not allocated!\n");
2325 case DMA_TERMINATE_ALL:
2326 spin_lock_irqsave(&d40c->lock, flags);
2328 spin_unlock_irqrestore(&d40c->lock, flags);
2331 return d40_pause(chan);
2333 return d40_resume(chan);
2334 case DMA_SLAVE_CONFIG:
2335 d40_set_runtime_config(chan,
2336 (struct dma_slave_config *) arg);
2342 /* Other commands are unimplemented */
2346 /* Initialization functions */
2348 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2349 struct d40_chan *chans, int offset,
2353 struct d40_chan *d40c;
2355 INIT_LIST_HEAD(&dma->channels);
2357 for (i = offset; i < offset + num_chans; i++) {
2360 d40c->chan.device = dma;
2362 spin_lock_init(&d40c->lock);
2364 d40c->log_num = D40_PHY_CHAN;
2366 INIT_LIST_HEAD(&d40c->active);
2367 INIT_LIST_HEAD(&d40c->queue);
2368 INIT_LIST_HEAD(&d40c->client);
2370 tasklet_init(&d40c->tasklet, dma_tasklet,
2371 (unsigned long) d40c);
2373 list_add_tail(&d40c->chan.device_node,
2378 static int __init d40_dmaengine_init(struct d40_base *base,
2379 int num_reserved_chans)
2383 d40_chan_init(base, &base->dma_slave, base->log_chans,
2384 0, base->num_log_chans);
2386 dma_cap_zero(base->dma_slave.cap_mask);
2387 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2389 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2390 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2391 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2392 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2393 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2394 base->dma_slave.device_tx_status = d40_tx_status;
2395 base->dma_slave.device_issue_pending = d40_issue_pending;
2396 base->dma_slave.device_control = d40_control;
2397 base->dma_slave.dev = base->dev;
2399 err = dma_async_device_register(&base->dma_slave);
2402 d40_err(base->dev, "Failed to register slave channels\n");
2406 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2407 base->num_log_chans, base->plat_data->memcpy_len);
2409 dma_cap_zero(base->dma_memcpy.cap_mask);
2410 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2411 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2413 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2414 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2415 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2416 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2417 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2418 base->dma_memcpy.device_tx_status = d40_tx_status;
2419 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2420 base->dma_memcpy.device_control = d40_control;
2421 base->dma_memcpy.dev = base->dev;
2423 * This controller can only access address at even
2424 * 32bit boundaries, i.e. 2^2
2426 base->dma_memcpy.copy_align = 2;
2428 err = dma_async_device_register(&base->dma_memcpy);
2432 "Failed to regsiter memcpy only channels\n");
2436 d40_chan_init(base, &base->dma_both, base->phy_chans,
2437 0, num_reserved_chans);
2439 dma_cap_zero(base->dma_both.cap_mask);
2440 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2441 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2442 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2444 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2445 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2446 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2447 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2448 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2449 base->dma_both.device_tx_status = d40_tx_status;
2450 base->dma_both.device_issue_pending = d40_issue_pending;
2451 base->dma_both.device_control = d40_control;
2452 base->dma_both.dev = base->dev;
2453 base->dma_both.copy_align = 2;
2454 err = dma_async_device_register(&base->dma_both);
2458 "Failed to register logical and physical capable channels\n");
2463 dma_async_device_unregister(&base->dma_memcpy);
2465 dma_async_device_unregister(&base->dma_slave);
2470 /* Initialization functions. */
2472 static int __init d40_phy_res_init(struct d40_base *base)
2475 int num_phy_chans_avail = 0;
2477 int odd_even_bit = -2;
2479 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2480 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2482 for (i = 0; i < base->num_phy_chans; i++) {
2483 base->phy_res[i].num = i;
2484 odd_even_bit += 2 * ((i % 2) == 0);
2485 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2486 /* Mark security only channels as occupied */
2487 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2488 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2490 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2491 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2492 num_phy_chans_avail++;
2494 spin_lock_init(&base->phy_res[i].lock);
2497 /* Mark disabled channels as occupied */
2498 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2499 int chan = base->plat_data->disabled_channels[i];
2501 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2502 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2503 num_phy_chans_avail--;
2506 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2507 num_phy_chans_avail, base->num_phy_chans);
2509 /* Verify settings extended vs standard */
2510 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2512 for (i = 0; i < base->num_phy_chans; i++) {
2514 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2515 (val[0] & 0x3) != 1)
2517 "[%s] INFO: channel %d is misconfigured (%d)\n",
2518 __func__, i, val[0] & 0x3);
2520 val[0] = val[0] >> 2;
2523 return num_phy_chans_avail;
2526 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2528 static const struct d40_reg_val dma_id_regs[] = {
2530 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2531 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2533 * D40_DREG_PERIPHID2 Depends on HW revision:
2534 * DB8500ed has 0x0008,
2536 * DB8500v1 has 0x0028
2537 * DB8500v2 has 0x0038
2539 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2542 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2543 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2544 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2545 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2547 struct stedma40_platform_data *plat_data;
2548 struct clk *clk = NULL;
2549 void __iomem *virtbase = NULL;
2550 struct resource *res = NULL;
2551 struct d40_base *base = NULL;
2552 int num_log_chans = 0;
2558 clk = clk_get(&pdev->dev, NULL);
2561 d40_err(&pdev->dev, "No matching clock found\n");
2567 /* Get IO for DMAC base address */
2568 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2572 if (request_mem_region(res->start, resource_size(res),
2573 D40_NAME " I/O base") == NULL)
2576 virtbase = ioremap(res->start, resource_size(res));
2580 /* HW version check */
2581 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2582 if (dma_id_regs[i].val !=
2583 readl(virtbase + dma_id_regs[i].reg)) {
2585 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2588 readl(virtbase + dma_id_regs[i].reg));
2593 /* Get silicon revision and designer */
2594 val = readl(virtbase + D40_DREG_PERIPHID2);
2596 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2598 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2599 val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2604 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2605 D40_DREG_PERIPHID2_REV_POS;
2607 /* The number of physical channels on this HW */
2608 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2610 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2613 plat_data = pdev->dev.platform_data;
2615 /* Count the number of logical channels in use */
2616 for (i = 0; i < plat_data->dev_len; i++)
2617 if (plat_data->dev_rx[i] != 0)
2620 for (i = 0; i < plat_data->dev_len; i++)
2621 if (plat_data->dev_tx[i] != 0)
2624 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2625 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2626 sizeof(struct d40_chan), GFP_KERNEL);
2629 d40_err(&pdev->dev, "Out of memory\n");
2635 base->num_phy_chans = num_phy_chans;
2636 base->num_log_chans = num_log_chans;
2637 base->phy_start = res->start;
2638 base->phy_size = resource_size(res);
2639 base->virtbase = virtbase;
2640 base->plat_data = plat_data;
2641 base->dev = &pdev->dev;
2642 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2643 base->log_chans = &base->phy_chans[num_phy_chans];
2645 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2650 base->lookup_phy_chans = kzalloc(num_phy_chans *
2651 sizeof(struct d40_chan *),
2653 if (!base->lookup_phy_chans)
2656 if (num_log_chans + plat_data->memcpy_len) {
2658 * The max number of logical channels are event lines for all
2659 * src devices and dst devices
2661 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2662 sizeof(struct d40_chan *),
2664 if (!base->lookup_log_chans)
2668 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2669 sizeof(struct d40_desc *) *
2670 D40_LCLA_LINK_PER_EVENT_GRP,
2672 if (!base->lcla_pool.alloc_map)
2675 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2676 0, SLAB_HWCACHE_ALIGN,
2678 if (base->desc_slab == NULL)
2691 release_mem_region(res->start,
2692 resource_size(res));
2697 kfree(base->lcla_pool.alloc_map);
2698 kfree(base->lookup_log_chans);
2699 kfree(base->lookup_phy_chans);
2700 kfree(base->phy_res);
2707 static void __init d40_hw_init(struct d40_base *base)
2710 static const struct d40_reg_val dma_init_reg[] = {
2711 /* Clock every part of the DMA block from start */
2712 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2714 /* Interrupts on all logical channels */
2715 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2716 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2717 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2718 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2719 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2720 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2721 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2722 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2723 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2724 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2725 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2726 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2729 u32 prmseo[2] = {0, 0};
2730 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2734 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2735 writel(dma_init_reg[i].val,
2736 base->virtbase + dma_init_reg[i].reg);
2738 /* Configure all our dma channels to default settings */
2739 for (i = 0; i < base->num_phy_chans; i++) {
2741 activeo[i % 2] = activeo[i % 2] << 2;
2743 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2745 activeo[i % 2] |= 3;
2749 /* Enable interrupt # */
2750 pcmis = (pcmis << 1) | 1;
2752 /* Clear interrupt # */
2753 pcicr = (pcicr << 1) | 1;
2755 /* Set channel to physical mode */
2756 prmseo[i % 2] = prmseo[i % 2] << 2;
2761 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2762 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2763 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2764 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2766 /* Write which interrupt to enable */
2767 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2769 /* Write which interrupt to clear */
2770 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2774 static int __init d40_lcla_allocate(struct d40_base *base)
2776 unsigned long *page_list;
2781 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2782 * To full fill this hardware requirement without wasting 256 kb
2783 * we allocate pages until we get an aligned one.
2785 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2793 /* Calculating how many pages that are required */
2794 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2796 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2797 page_list[i] = __get_free_pages(GFP_KERNEL,
2798 base->lcla_pool.pages);
2799 if (!page_list[i]) {
2801 d40_err(base->dev, "Failed to allocate %d pages.\n",
2802 base->lcla_pool.pages);
2804 for (j = 0; j < i; j++)
2805 free_pages(page_list[j], base->lcla_pool.pages);
2809 if ((virt_to_phys((void *)page_list[i]) &
2810 (LCLA_ALIGNMENT - 1)) == 0)
2814 for (j = 0; j < i; j++)
2815 free_pages(page_list[j], base->lcla_pool.pages);
2817 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2818 base->lcla_pool.base = (void *)page_list[i];
2821 * After many attempts and no succees with finding the correct
2822 * alignment, try with allocating a big buffer.
2825 "[%s] Failed to get %d pages @ 18 bit align.\n",
2826 __func__, base->lcla_pool.pages);
2827 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2828 base->num_phy_chans +
2831 if (!base->lcla_pool.base_unaligned) {
2836 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2840 writel(virt_to_phys(base->lcla_pool.base),
2841 base->virtbase + D40_DREG_LCLA);
2847 static int __init d40_probe(struct platform_device *pdev)
2851 struct d40_base *base;
2852 struct resource *res = NULL;
2853 int num_reserved_chans;
2856 base = d40_hw_detect_init(pdev);
2861 num_reserved_chans = d40_phy_res_init(base);
2863 platform_set_drvdata(pdev, base);
2865 spin_lock_init(&base->interrupt_lock);
2866 spin_lock_init(&base->execmd_lock);
2868 /* Get IO for logical channel parameter address */
2869 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2872 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2875 base->lcpa_size = resource_size(res);
2876 base->phy_lcpa = res->start;
2878 if (request_mem_region(res->start, resource_size(res),
2879 D40_NAME " I/O lcpa") == NULL) {
2882 "Failed to request LCPA region 0x%x-0x%x\n",
2883 res->start, res->end);
2887 /* We make use of ESRAM memory for this. */
2888 val = readl(base->virtbase + D40_DREG_LCPA);
2889 if (res->start != val && val != 0) {
2890 dev_warn(&pdev->dev,
2891 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2892 __func__, val, res->start);
2894 writel(res->start, base->virtbase + D40_DREG_LCPA);
2896 base->lcpa_base = ioremap(res->start, resource_size(res));
2897 if (!base->lcpa_base) {
2899 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2903 ret = d40_lcla_allocate(base);
2905 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2909 spin_lock_init(&base->lcla_pool.lock);
2911 base->irq = platform_get_irq(pdev, 0);
2913 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2915 d40_err(&pdev->dev, "No IRQ defined\n");
2919 err = d40_dmaengine_init(base, num_reserved_chans);
2925 dev_info(base->dev, "initialized\n");
2930 if (base->desc_slab)
2931 kmem_cache_destroy(base->desc_slab);
2933 iounmap(base->virtbase);
2934 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2935 free_pages((unsigned long)base->lcla_pool.base,
2936 base->lcla_pool.pages);
2938 kfree(base->lcla_pool.base_unaligned);
2941 release_mem_region(base->phy_lcpa,
2943 if (base->phy_start)
2944 release_mem_region(base->phy_start,
2947 clk_disable(base->clk);
2951 kfree(base->lcla_pool.alloc_map);
2952 kfree(base->lookup_log_chans);
2953 kfree(base->lookup_phy_chans);
2954 kfree(base->phy_res);
2958 d40_err(&pdev->dev, "probe failed\n");
2962 static struct platform_driver d40_driver = {
2964 .owner = THIS_MODULE,
2969 static int __init stedma40_init(void)
2971 return platform_driver_probe(&d40_driver, d40_probe);
2973 arch_initcall(stedma40_init);