2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/dmaengine.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
17 #include <plat/ste_dma40.h>
19 #include "ste_dma40_ll.h"
21 #define D40_NAME "dma40"
23 #define D40_PHY_CHAN -1
25 /* For masking out/in 2 bit channel positions */
26 #define D40_CHAN_POS(chan) (2 * (chan / 2))
27 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29 /* Maximum iterations taken before giving up suspending a channel */
30 #define D40_SUSPEND_MAX_IT 500
32 /* Hardware requirement on LCLA alignment */
33 #define LCLA_ALIGNMENT 0x40000
35 /* Max number of links per event group */
36 #define D40_LCLA_LINK_PER_EVENT_GRP 128
37 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
39 /* Attempts before giving up to trying to get pages that are aligned */
40 #define MAX_LCLA_ALLOC_ATTEMPTS 256
42 /* Bit markings for allocation map */
43 #define D40_ALLOC_FREE (1 << 31)
44 #define D40_ALLOC_PHY (1 << 30)
45 #define D40_ALLOC_LOG_FREE 0
47 /* Hardware designer of the block */
48 #define D40_HW_DESIGNER 0x8
51 * enum 40_command - The different commands and/or statuses.
53 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
54 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
55 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
56 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
61 D40_DMA_SUSPEND_REQ = 2,
66 * struct d40_lli_pool - Structure for keeping LLIs in memory
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used.
71 * @dma_addr: DMA address, if mapped
72 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
73 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
74 * one buffer to one buffer.
80 /* Space for dst and src, plus an extra for padding */
81 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
85 * struct d40_desc - A descriptor is one DMA job.
87 * @lli_phy: LLI settings for physical channel. Both src and dst=
88 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
90 * @lli_log: Same as above but for logical channels.
91 * @lli_pool: The pool with two entries pre-allocated.
92 * @lli_len: Number of llis of current descriptor.
93 * @lli_current: Number of transfered llis.
94 * @lcla_alloc: Number of LCLA entries allocated.
95 * @txd: DMA engine struct. Used for among other things for communication
98 * @is_in_client_list: true if the client owns this descriptor.
101 * This descriptor is used for both logical and physical transfers.
105 struct d40_phy_lli_bidir lli_phy;
107 struct d40_log_lli_bidir lli_log;
109 struct d40_lli_pool lli_pool;
114 struct dma_async_tx_descriptor txd;
115 struct list_head node;
117 bool is_in_client_list;
121 * struct d40_lcla_pool - LCLA pool settings and data.
123 * @base: The virtual address of LCLA. 18 bit aligned.
124 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
125 * This pointer is only there for clean-up on error.
126 * @pages: The number of pages needed for all physical channels.
127 * Only used later for clean-up on error
128 * @lock: Lock to protect the content in this struct.
129 * @alloc_map: big map over which LCLA entry is own by which job.
131 struct d40_lcla_pool {
134 void *base_unaligned;
137 struct d40_desc **alloc_map;
141 * struct d40_phy_res - struct for handling eventlines mapped to physical
144 * @lock: A lock protection this entity.
145 * @num: The physical channel number of this entity.
146 * @allocated_src: Bit mapped to show which src event line's are mapped to
147 * this physical channel. Can also be free or physically allocated.
148 * @allocated_dst: Same as for src but is dst.
149 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
162 * struct d40_chan - Struct that describes a channel.
164 * @lock: A spinlock to protect this struct.
165 * @log_num: The logical number, if any of this channel.
166 * @completed: Starts with 1, after first interrupt it is set to dma engine's
168 * @pending_tx: The number of pending transfers. Used between interrupt handler
170 * @busy: Set to true when transfer is ongoing on this channel.
171 * @phy_chan: Pointer to physical channel which this instance runs on. If this
172 * point is NULL, then the channel is not allocated.
173 * @chan: DMA engine handle.
174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
175 * transfer and call client callback.
176 * @client: Cliented owned descriptor list.
177 * @active: Active descriptor.
178 * @queue: Queued jobs.
179 * @dma_cfg: The client configuration of this dma channel.
180 * @configured: whether the dma_cfg configuration is valid
181 * @base: Pointer to the device instance struct.
182 * @src_def_cfg: Default cfg register setting for src.
183 * @dst_def_cfg: Default cfg register setting for dst.
184 * @log_def: Default logical channel settings.
185 * @lcla: Space for one dst src pair for logical channel transfers.
186 * @lcpa: Pointer to dst and src lcpa settings.
188 * This struct can either "be" a logical or a physical channel.
193 /* ID of the most recent completed transfer */
197 struct d40_phy_res *phy_chan;
198 struct dma_chan chan;
199 struct tasklet_struct tasklet;
200 struct list_head client;
201 struct list_head active;
202 struct list_head queue;
203 struct stedma40_chan_cfg dma_cfg;
205 struct d40_base *base;
206 /* Default register configurations */
209 struct d40_def_lcsp log_def;
210 struct d40_log_lli_full *lcpa;
211 /* Runtime reconfiguration */
212 dma_addr_t runtime_addr;
213 enum dma_data_direction runtime_direction;
217 * struct d40_base - The big global struct, one for each probe'd instance.
219 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
220 * @execmd_lock: Lock for execute command usage since several channels share
221 * the same physical register.
222 * @dev: The device structure.
223 * @virtbase: The virtual base address of the DMA's register.
224 * @rev: silicon revision detected.
225 * @clk: Pointer to the DMA clock structure.
226 * @phy_start: Physical memory start of the DMA registers.
227 * @phy_size: Size of the DMA register map.
228 * @irq: The IRQ number.
229 * @num_phy_chans: The number of physical channels. Read from HW. This
230 * is the number of available channels for this driver, not counting "Secure
231 * mode" allocated physical channels.
232 * @num_log_chans: The number of logical channels. Calculated from
234 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
235 * @dma_slave: dma_device channels that can do only do slave transfers.
236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
237 * @log_chans: Room for all possible logical channels in system.
238 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
239 * to log_chans entries.
240 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
241 * to phy_chans entries.
242 * @plat_data: Pointer to provided platform_data which is the driver
244 * @phy_res: Vector containing all physical channels.
245 * @lcla_pool: lcla pool settings and data.
246 * @lcpa_base: The virtual mapped address of LCPA.
247 * @phy_lcpa: The physical address of the LCPA.
248 * @lcpa_size: The size of the LCPA area.
249 * @desc_slab: cache for descriptors.
252 spinlock_t interrupt_lock;
253 spinlock_t execmd_lock;
255 void __iomem *virtbase;
258 phys_addr_t phy_start;
259 resource_size_t phy_size;
263 struct dma_device dma_both;
264 struct dma_device dma_slave;
265 struct dma_device dma_memcpy;
266 struct d40_chan *phy_chans;
267 struct d40_chan *log_chans;
268 struct d40_chan **lookup_log_chans;
269 struct d40_chan **lookup_phy_chans;
270 struct stedma40_platform_data *plat_data;
271 /* Physical half channels */
272 struct d40_phy_res *phy_res;
273 struct d40_lcla_pool lcla_pool;
276 resource_size_t lcpa_size;
277 struct kmem_cache *desc_slab;
281 * struct d40_interrupt_lookup - lookup table for interrupt handler
283 * @src: Interrupt mask register.
284 * @clr: Interrupt clear register.
285 * @is_error: true if this is an error interrupt.
286 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
287 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
289 struct d40_interrupt_lookup {
297 * struct d40_reg_val - simple lookup struct
299 * @reg: The register.
300 * @val: The value that belongs to the register in reg.
307 static struct device *chan2dev(struct d40_chan *d40c)
309 return &d40c->chan.dev->device;
312 static bool chan_is_physical(struct d40_chan *chan)
314 return chan->log_num == D40_PHY_CHAN;
317 static bool chan_is_logical(struct d40_chan *chan)
319 return !chan_is_physical(chan);
322 static void __iomem *chan_base(struct d40_chan *chan)
324 return chan->base->virtbase + D40_DREG_PCBASE +
325 chan->phy_chan->num * D40_DREG_PCDELTA;
328 #define d40_err(dev, format, arg...) \
329 dev_err(dev, "[%s] " format, __func__, ## arg)
331 #define chan_err(d40c, format, arg...) \
332 d40_err(chan2dev(d40c), format, ## arg)
334 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
335 int lli_len, bool is_log)
341 align = sizeof(struct d40_log_lli);
343 align = sizeof(struct d40_phy_lli);
346 base = d40d->lli_pool.pre_alloc_lli;
347 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
348 d40d->lli_pool.base = NULL;
350 d40d->lli_pool.size = lli_len * 2 * align;
352 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
353 d40d->lli_pool.base = base;
355 if (d40d->lli_pool.base == NULL)
360 d40d->lli_log.src = PTR_ALIGN(base, align);
361 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
363 d40d->lli_pool.dma_addr = 0;
365 d40d->lli_phy.src = PTR_ALIGN(base, align);
366 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
368 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
373 if (dma_mapping_error(d40c->base->dev,
374 d40d->lli_pool.dma_addr)) {
375 kfree(d40d->lli_pool.base);
376 d40d->lli_pool.base = NULL;
377 d40d->lli_pool.dma_addr = 0;
385 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
387 if (d40d->lli_pool.dma_addr)
388 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
389 d40d->lli_pool.size, DMA_TO_DEVICE);
391 kfree(d40d->lli_pool.base);
392 d40d->lli_pool.base = NULL;
393 d40d->lli_pool.size = 0;
394 d40d->lli_log.src = NULL;
395 d40d->lli_log.dst = NULL;
396 d40d->lli_phy.src = NULL;
397 d40d->lli_phy.dst = NULL;
400 static int d40_lcla_alloc_one(struct d40_chan *d40c,
401 struct d40_desc *d40d)
408 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
410 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
413 * Allocate both src and dst at the same time, therefore the half
414 * start on 1 since 0 can't be used since zero is used as end marker.
416 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
417 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
418 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
425 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
430 static int d40_lcla_free_all(struct d40_chan *d40c,
431 struct d40_desc *d40d)
437 if (chan_is_physical(d40c))
440 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
442 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
443 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
444 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
445 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
446 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
448 if (d40d->lcla_alloc == 0) {
455 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
461 static void d40_desc_remove(struct d40_desc *d40d)
463 list_del(&d40d->node);
466 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
468 struct d40_desc *desc = NULL;
470 if (!list_empty(&d40c->client)) {
474 list_for_each_entry_safe(d, _d, &d40c->client, node)
475 if (async_tx_test_ack(&d->txd)) {
476 d40_pool_lli_free(d40c, d);
479 memset(desc, 0, sizeof(*desc));
485 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
488 INIT_LIST_HEAD(&desc->node);
493 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
496 d40_pool_lli_free(d40c, d40d);
497 d40_lcla_free_all(d40c, d40d);
498 kmem_cache_free(d40c->base->desc_slab, d40d);
501 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
503 list_add_tail(&desc->node, &d40c->active);
506 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
508 int curr_lcla = -EINVAL, next_lcla;
510 if (chan_is_physical(d40c)) {
511 d40_phy_lli_write(d40c->base->virtbase,
515 d40d->lli_current = d40d->lli_len;
518 if ((d40d->lli_len - d40d->lli_current) > 1)
519 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
521 d40_log_lli_lcpa_write(d40c->lcpa,
522 &d40d->lli_log.dst[d40d->lli_current],
523 &d40d->lli_log.src[d40d->lli_current],
527 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
528 unsigned int lcla_offset = d40c->phy_chan->num * 1024 +
530 struct d40_lcla_pool *pool = &d40c->base->lcla_pool;
531 struct d40_log_lli *lcla = pool->base + lcla_offset;
533 if (d40d->lli_current + 1 < d40d->lli_len)
534 next_lcla = d40_lcla_alloc_one(d40c, d40d);
538 d40_log_lli_lcla_write(lcla,
539 &d40d->lli_log.dst[d40d->lli_current],
540 &d40d->lli_log.src[d40d->lli_current],
543 dma_sync_single_range_for_device(d40c->base->dev,
544 pool->dma_addr, lcla_offset,
545 2 * sizeof(struct d40_log_lli),
548 curr_lcla = next_lcla;
550 if (curr_lcla == -EINVAL) {
559 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
563 if (list_empty(&d40c->active))
566 d = list_first_entry(&d40c->active,
572 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
574 list_add_tail(&desc->node, &d40c->queue);
577 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
581 if (list_empty(&d40c->queue))
584 d = list_first_entry(&d40c->queue,
590 static int d40_psize_2_burst_size(bool is_log, int psize)
593 if (psize == STEDMA40_PSIZE_LOG_1)
596 if (psize == STEDMA40_PSIZE_PHY_1)
604 * The dma only supports transmitting packages up to
605 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
606 * dma elements required to send the entire sg list
608 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
611 u32 max_w = max(data_width1, data_width2);
612 u32 min_w = min(data_width1, data_width2);
613 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
615 if (seg_max > STEDMA40_MAX_SEG_SIZE)
616 seg_max -= (1 << max_w);
618 if (!IS_ALIGNED(size, 1 << max_w))
624 dmalen = size / seg_max;
625 if (dmalen * seg_max < size)
631 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
632 u32 data_width1, u32 data_width2)
634 struct scatterlist *sg;
639 for_each_sg(sgl, sg, sg_len, i) {
640 ret = d40_size_2_dmalen(sg_dma_len(sg),
641 data_width1, data_width2);
649 /* Support functions for logical channels */
651 static int d40_channel_execute_command(struct d40_chan *d40c,
652 enum d40_command command)
656 void __iomem *active_reg;
661 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
663 if (d40c->phy_chan->num % 2 == 0)
664 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
666 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
668 if (command == D40_DMA_SUSPEND_REQ) {
669 status = (readl(active_reg) &
670 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
671 D40_CHAN_POS(d40c->phy_chan->num);
673 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
677 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
678 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
681 if (command == D40_DMA_SUSPEND_REQ) {
683 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
684 status = (readl(active_reg) &
685 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
686 D40_CHAN_POS(d40c->phy_chan->num);
690 * Reduce the number of bus accesses while
691 * waiting for the DMA to suspend.
695 if (status == D40_DMA_STOP ||
696 status == D40_DMA_SUSPENDED)
700 if (i == D40_SUSPEND_MAX_IT) {
702 "unable to suspend the chl %d (log: %d) status %x\n",
703 d40c->phy_chan->num, d40c->log_num,
711 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
715 static void d40_term_all(struct d40_chan *d40c)
717 struct d40_desc *d40d;
719 /* Release active descriptors */
720 while ((d40d = d40_first_active_get(d40c))) {
721 d40_desc_remove(d40d);
722 d40_desc_free(d40c, d40d);
725 /* Release queued descriptors waiting for transfer */
726 while ((d40d = d40_first_queued(d40c))) {
727 d40_desc_remove(d40d);
728 d40_desc_free(d40c, d40d);
732 d40c->pending_tx = 0;
736 static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
739 void __iomem *addr = chan_base(d40c) + reg;
743 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
744 | ~D40_EVENTLINE_MASK(event), addr);
749 * The hardware sometimes doesn't register the enable when src and dst
750 * event lines are active on the same logical channel. Retry to ensure
751 * it does. Usually only one retry is sufficient.
755 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
756 | ~D40_EVENTLINE_MASK(event), addr);
758 if (readl(addr) & D40_EVENTLINE_MASK(event))
763 dev_dbg(chan2dev(d40c),
764 "[%s] workaround enable S%cLNK (%d tries)\n",
765 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
771 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
775 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
777 /* Enable event line connected to device (or memcpy) */
778 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
779 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
780 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
782 __d40_config_set_event(d40c, do_enable, event,
786 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
787 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
789 __d40_config_set_event(d40c, do_enable, event,
793 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
796 static u32 d40_chan_has_events(struct d40_chan *d40c)
798 void __iomem *chanbase = chan_base(d40c);
801 val = readl(chanbase + D40_CHAN_REG_SSLNK);
802 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
807 static u32 d40_get_prmo(struct d40_chan *d40c)
809 static const unsigned int phy_map[] = {
810 [STEDMA40_PCHAN_BASIC_MODE]
811 = D40_DREG_PRMO_PCHAN_BASIC,
812 [STEDMA40_PCHAN_MODULO_MODE]
813 = D40_DREG_PRMO_PCHAN_MODULO,
814 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
815 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
817 static const unsigned int log_map[] = {
818 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
819 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
820 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
821 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
822 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
823 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
826 if (chan_is_physical(d40c))
827 return phy_map[d40c->dma_cfg.mode_opt];
829 return log_map[d40c->dma_cfg.mode_opt];
832 static void d40_config_write(struct d40_chan *d40c)
837 /* Odd addresses are even addresses + 4 */
838 addr_base = (d40c->phy_chan->num % 2) * 4;
839 /* Setup channel mode to logical or physical */
840 var = ((u32)(chan_is_logical(d40c)) + 1) <<
841 D40_CHAN_POS(d40c->phy_chan->num);
842 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
844 /* Setup operational mode option register */
845 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
847 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
849 if (chan_is_logical(d40c)) {
850 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
851 & D40_SREG_ELEM_LOG_LIDX_MASK;
852 void __iomem *chanbase = chan_base(d40c);
854 /* Set default config for CFG reg */
855 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
856 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
858 /* Set LIDX for lcla */
859 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
860 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
864 static u32 d40_residue(struct d40_chan *d40c)
868 if (chan_is_logical(d40c))
869 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
870 >> D40_MEM_LCSP2_ECNT_POS;
872 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
873 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
874 >> D40_SREG_ELEM_PHY_ECNT_POS;
877 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
880 static bool d40_tx_is_linked(struct d40_chan *d40c)
884 if (chan_is_logical(d40c))
885 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
887 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
888 & D40_SREG_LNK_PHYS_LNK_MASK;
893 static int d40_pause(struct dma_chan *chan)
895 struct d40_chan *d40c =
896 container_of(chan, struct d40_chan, chan);
903 spin_lock_irqsave(&d40c->lock, flags);
905 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
907 if (chan_is_logical(d40c)) {
908 d40_config_set_event(d40c, false);
909 /* Resume the other logical channels if any */
910 if (d40_chan_has_events(d40c))
911 res = d40_channel_execute_command(d40c,
916 spin_unlock_irqrestore(&d40c->lock, flags);
920 static int d40_resume(struct dma_chan *chan)
922 struct d40_chan *d40c =
923 container_of(chan, struct d40_chan, chan);
930 spin_lock_irqsave(&d40c->lock, flags);
932 if (d40c->base->rev == 0)
933 if (chan_is_logical(d40c)) {
934 res = d40_channel_execute_command(d40c,
935 D40_DMA_SUSPEND_REQ);
939 /* If bytes left to transfer or linked tx resume job */
940 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
942 if (chan_is_logical(d40c))
943 d40_config_set_event(d40c, true);
945 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
949 spin_unlock_irqrestore(&d40c->lock, flags);
953 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
955 struct d40_chan *d40c = container_of(tx->chan,
958 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
961 spin_lock_irqsave(&d40c->lock, flags);
965 if (d40c->chan.cookie < 0)
966 d40c->chan.cookie = 1;
968 d40d->txd.cookie = d40c->chan.cookie;
970 d40_desc_queue(d40c, d40d);
972 spin_unlock_irqrestore(&d40c->lock, flags);
977 static int d40_start(struct d40_chan *d40c)
979 if (d40c->base->rev == 0) {
982 if (chan_is_logical(d40c)) {
983 err = d40_channel_execute_command(d40c,
984 D40_DMA_SUSPEND_REQ);
990 if (chan_is_logical(d40c))
991 d40_config_set_event(d40c, true);
993 return d40_channel_execute_command(d40c, D40_DMA_RUN);
996 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
998 struct d40_desc *d40d;
1001 /* Start queued jobs, if any */
1002 d40d = d40_first_queued(d40c);
1007 /* Remove from queue */
1008 d40_desc_remove(d40d);
1010 /* Add to active queue */
1011 d40_desc_submit(d40c, d40d);
1013 /* Initiate DMA job */
1014 d40_desc_load(d40c, d40d);
1017 err = d40_start(d40c);
1026 /* called from interrupt context */
1027 static void dma_tc_handle(struct d40_chan *d40c)
1029 struct d40_desc *d40d;
1031 /* Get first active entry from list */
1032 d40d = d40_first_active_get(d40c);
1037 d40_lcla_free_all(d40c, d40d);
1039 if (d40d->lli_current < d40d->lli_len) {
1040 d40_desc_load(d40c, d40d);
1042 (void) d40_start(d40c);
1046 if (d40_queue_start(d40c) == NULL)
1050 tasklet_schedule(&d40c->tasklet);
1054 static void dma_tasklet(unsigned long data)
1056 struct d40_chan *d40c = (struct d40_chan *) data;
1057 struct d40_desc *d40d;
1058 unsigned long flags;
1059 dma_async_tx_callback callback;
1060 void *callback_param;
1062 spin_lock_irqsave(&d40c->lock, flags);
1064 /* Get first active entry from list */
1065 d40d = d40_first_active_get(d40c);
1070 d40c->completed = d40d->txd.cookie;
1073 * If terminating a channel pending_tx is set to zero.
1074 * This prevents any finished active jobs to return to the client.
1076 if (d40c->pending_tx == 0) {
1077 spin_unlock_irqrestore(&d40c->lock, flags);
1081 /* Callback to client */
1082 callback = d40d->txd.callback;
1083 callback_param = d40d->txd.callback_param;
1085 if (async_tx_test_ack(&d40d->txd)) {
1086 d40_pool_lli_free(d40c, d40d);
1087 d40_desc_remove(d40d);
1088 d40_desc_free(d40c, d40d);
1090 if (!d40d->is_in_client_list) {
1091 d40_desc_remove(d40d);
1092 d40_lcla_free_all(d40c, d40d);
1093 list_add_tail(&d40d->node, &d40c->client);
1094 d40d->is_in_client_list = true;
1100 if (d40c->pending_tx)
1101 tasklet_schedule(&d40c->tasklet);
1103 spin_unlock_irqrestore(&d40c->lock, flags);
1105 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1106 callback(callback_param);
1111 /* Rescue manouver if receiving double interrupts */
1112 if (d40c->pending_tx > 0)
1114 spin_unlock_irqrestore(&d40c->lock, flags);
1117 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1119 static const struct d40_interrupt_lookup il[] = {
1120 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1121 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1122 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1123 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1124 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1125 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1126 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1127 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1128 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1129 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1133 u32 regs[ARRAY_SIZE(il)];
1137 struct d40_chan *d40c;
1138 unsigned long flags;
1139 struct d40_base *base = data;
1141 spin_lock_irqsave(&base->interrupt_lock, flags);
1143 /* Read interrupt status of both logical and physical channels */
1144 for (i = 0; i < ARRAY_SIZE(il); i++)
1145 regs[i] = readl(base->virtbase + il[i].src);
1149 chan = find_next_bit((unsigned long *)regs,
1150 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1152 /* No more set bits found? */
1153 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1156 row = chan / BITS_PER_LONG;
1157 idx = chan & (BITS_PER_LONG - 1);
1160 writel(1 << idx, base->virtbase + il[row].clr);
1162 if (il[row].offset == D40_PHY_CHAN)
1163 d40c = base->lookup_phy_chans[idx];
1165 d40c = base->lookup_log_chans[il[row].offset + idx];
1166 spin_lock(&d40c->lock);
1168 if (!il[row].is_error)
1169 dma_tc_handle(d40c);
1171 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1172 chan, il[row].offset, idx);
1174 spin_unlock(&d40c->lock);
1177 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1182 static int d40_validate_conf(struct d40_chan *d40c,
1183 struct stedma40_chan_cfg *conf)
1186 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1187 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1188 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1191 chan_err(d40c, "Invalid direction.\n");
1195 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1196 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1197 d40c->runtime_addr == 0) {
1199 chan_err(d40c, "Invalid TX channel address (%d)\n",
1200 conf->dst_dev_type);
1204 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1205 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1206 d40c->runtime_addr == 0) {
1207 chan_err(d40c, "Invalid RX channel address (%d)\n",
1208 conf->src_dev_type);
1212 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1213 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1214 chan_err(d40c, "Invalid dst\n");
1218 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1219 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1220 chan_err(d40c, "Invalid src\n");
1224 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1225 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1226 chan_err(d40c, "No event line\n");
1230 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1231 (src_event_group != dst_event_group)) {
1232 chan_err(d40c, "Invalid event group\n");
1236 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1238 * DMAC HW supports it. Will be added to this driver,
1239 * in case any dma client requires it.
1241 chan_err(d40c, "periph to periph not supported\n");
1245 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1246 (1 << conf->src_info.data_width) !=
1247 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1248 (1 << conf->dst_info.data_width)) {
1250 * The DMAC hardware only supports
1251 * src (burst x width) == dst (burst x width)
1254 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1261 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1262 int log_event_line, bool is_log)
1264 unsigned long flags;
1265 spin_lock_irqsave(&phy->lock, flags);
1267 /* Physical interrupts are masked per physical full channel */
1268 if (phy->allocated_src == D40_ALLOC_FREE &&
1269 phy->allocated_dst == D40_ALLOC_FREE) {
1270 phy->allocated_dst = D40_ALLOC_PHY;
1271 phy->allocated_src = D40_ALLOC_PHY;
1277 /* Logical channel */
1279 if (phy->allocated_src == D40_ALLOC_PHY)
1282 if (phy->allocated_src == D40_ALLOC_FREE)
1283 phy->allocated_src = D40_ALLOC_LOG_FREE;
1285 if (!(phy->allocated_src & (1 << log_event_line))) {
1286 phy->allocated_src |= 1 << log_event_line;
1291 if (phy->allocated_dst == D40_ALLOC_PHY)
1294 if (phy->allocated_dst == D40_ALLOC_FREE)
1295 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1297 if (!(phy->allocated_dst & (1 << log_event_line))) {
1298 phy->allocated_dst |= 1 << log_event_line;
1305 spin_unlock_irqrestore(&phy->lock, flags);
1308 spin_unlock_irqrestore(&phy->lock, flags);
1312 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1315 unsigned long flags;
1316 bool is_free = false;
1318 spin_lock_irqsave(&phy->lock, flags);
1319 if (!log_event_line) {
1320 phy->allocated_dst = D40_ALLOC_FREE;
1321 phy->allocated_src = D40_ALLOC_FREE;
1326 /* Logical channel */
1328 phy->allocated_src &= ~(1 << log_event_line);
1329 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1330 phy->allocated_src = D40_ALLOC_FREE;
1332 phy->allocated_dst &= ~(1 << log_event_line);
1333 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1334 phy->allocated_dst = D40_ALLOC_FREE;
1337 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1341 spin_unlock_irqrestore(&phy->lock, flags);
1346 static int d40_allocate_channel(struct d40_chan *d40c)
1351 struct d40_phy_res *phys;
1356 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1358 phys = d40c->base->phy_res;
1360 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1361 dev_type = d40c->dma_cfg.src_dev_type;
1362 log_num = 2 * dev_type;
1364 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1365 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1366 /* dst event lines are used for logical memcpy */
1367 dev_type = d40c->dma_cfg.dst_dev_type;
1368 log_num = 2 * dev_type + 1;
1373 event_group = D40_TYPE_TO_GROUP(dev_type);
1374 event_line = D40_TYPE_TO_EVENT(dev_type);
1377 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1378 /* Find physical half channel */
1379 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1381 if (d40_alloc_mask_set(&phys[i], is_src,
1386 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1387 int phy_num = j + event_group * 2;
1388 for (i = phy_num; i < phy_num + 2; i++) {
1389 if (d40_alloc_mask_set(&phys[i],
1398 d40c->phy_chan = &phys[i];
1399 d40c->log_num = D40_PHY_CHAN;
1405 /* Find logical channel */
1406 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1407 int phy_num = j + event_group * 2;
1409 * Spread logical channels across all available physical rather
1410 * than pack every logical channel at the first available phy
1414 for (i = phy_num; i < phy_num + 2; i++) {
1415 if (d40_alloc_mask_set(&phys[i], is_src,
1416 event_line, is_log))
1420 for (i = phy_num + 1; i >= phy_num; i--) {
1421 if (d40_alloc_mask_set(&phys[i], is_src,
1422 event_line, is_log))
1430 d40c->phy_chan = &phys[i];
1431 d40c->log_num = log_num;
1435 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1437 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1443 static int d40_config_memcpy(struct d40_chan *d40c)
1445 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1447 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1448 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1449 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1450 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1451 memcpy[d40c->chan.chan_id];
1453 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1454 dma_has_cap(DMA_SLAVE, cap)) {
1455 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1457 chan_err(d40c, "No memcpy\n");
1465 static int d40_free_dma(struct d40_chan *d40c)
1470 struct d40_phy_res *phy = d40c->phy_chan;
1473 struct d40_desc *_d;
1476 /* Terminate all queued and active transfers */
1479 /* Release client owned descriptors */
1480 if (!list_empty(&d40c->client))
1481 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1482 d40_pool_lli_free(d40c, d);
1484 d40_desc_free(d40c, d);
1488 chan_err(d40c, "phy == null\n");
1492 if (phy->allocated_src == D40_ALLOC_FREE &&
1493 phy->allocated_dst == D40_ALLOC_FREE) {
1494 chan_err(d40c, "channel already free\n");
1498 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1499 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1500 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1502 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1503 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1506 chan_err(d40c, "Unknown direction\n");
1510 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1512 chan_err(d40c, "suspend failed\n");
1516 if (chan_is_logical(d40c)) {
1517 /* Release logical channel, deactivate the event line */
1519 d40_config_set_event(d40c, false);
1520 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1523 * Check if there are more logical allocation
1524 * on this phy channel.
1526 if (!d40_alloc_mask_free(phy, is_src, event)) {
1527 /* Resume the other logical channels if any */
1528 if (d40_chan_has_events(d40c)) {
1529 res = d40_channel_execute_command(d40c,
1533 "Executing RUN command\n");
1540 (void) d40_alloc_mask_free(phy, is_src, 0);
1543 /* Release physical channel */
1544 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1546 chan_err(d40c, "Failed to stop channel\n");
1549 d40c->phy_chan = NULL;
1550 d40c->configured = false;
1551 d40c->base->lookup_phy_chans[phy->num] = NULL;
1556 static bool d40_is_paused(struct d40_chan *d40c)
1558 void __iomem *chanbase = chan_base(d40c);
1559 bool is_paused = false;
1560 unsigned long flags;
1561 void __iomem *active_reg;
1565 spin_lock_irqsave(&d40c->lock, flags);
1567 if (chan_is_physical(d40c)) {
1568 if (d40c->phy_chan->num % 2 == 0)
1569 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1571 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1573 status = (readl(active_reg) &
1574 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1575 D40_CHAN_POS(d40c->phy_chan->num);
1576 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1582 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1583 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1584 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1585 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1586 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1587 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1588 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1590 chan_err(d40c, "Unknown direction\n");
1594 status = (status & D40_EVENTLINE_MASK(event)) >>
1595 D40_EVENTLINE_POS(event);
1597 if (status != D40_DMA_RUN)
1600 spin_unlock_irqrestore(&d40c->lock, flags);
1606 static u32 stedma40_residue(struct dma_chan *chan)
1608 struct d40_chan *d40c =
1609 container_of(chan, struct d40_chan, chan);
1611 unsigned long flags;
1613 spin_lock_irqsave(&d40c->lock, flags);
1614 bytes_left = d40_residue(d40c);
1615 spin_unlock_irqrestore(&d40c->lock, flags);
1620 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1621 struct scatterlist *sgl_dst,
1622 struct scatterlist *sgl_src,
1623 unsigned int sgl_len,
1624 unsigned long dma_flags)
1627 struct d40_desc *d40d;
1628 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1630 unsigned long flags;
1632 if (d40c->phy_chan == NULL) {
1633 chan_err(d40c, "Unallocated channel.\n");
1634 return ERR_PTR(-EINVAL);
1637 spin_lock_irqsave(&d40c->lock, flags);
1638 d40d = d40_desc_get(d40c);
1643 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1644 d40c->dma_cfg.src_info.data_width,
1645 d40c->dma_cfg.dst_info.data_width);
1646 if (d40d->lli_len < 0) {
1647 chan_err(d40c, "Unaligned size\n");
1651 d40d->lli_current = 0;
1652 d40d->txd.flags = dma_flags;
1654 if (chan_is_logical(d40c)) {
1656 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) {
1657 chan_err(d40c, "Out of memory\n");
1661 (void) d40_log_sg_to_lli(sgl_src,
1664 d40c->log_def.lcsp1,
1665 d40c->dma_cfg.src_info.data_width,
1666 d40c->dma_cfg.dst_info.data_width);
1668 (void) d40_log_sg_to_lli(sgl_dst,
1671 d40c->log_def.lcsp3,
1672 d40c->dma_cfg.dst_info.data_width,
1673 d40c->dma_cfg.src_info.data_width);
1675 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
1676 chan_err(d40c, "Out of memory\n");
1680 res = d40_phy_sg_to_lli(sgl_src,
1684 virt_to_phys(d40d->lli_phy.src),
1686 d40c->dma_cfg.src_info.data_width,
1687 d40c->dma_cfg.dst_info.data_width,
1688 d40c->dma_cfg.src_info.psize);
1693 res = d40_phy_sg_to_lli(sgl_dst,
1697 virt_to_phys(d40d->lli_phy.dst),
1699 d40c->dma_cfg.dst_info.data_width,
1700 d40c->dma_cfg.src_info.data_width,
1701 d40c->dma_cfg.dst_info.psize);
1706 dma_sync_single_for_device(d40c->base->dev,
1707 d40d->lli_pool.dma_addr,
1708 d40d->lli_pool.size, DMA_TO_DEVICE);
1711 dma_async_tx_descriptor_init(&d40d->txd, chan);
1713 d40d->txd.tx_submit = d40_tx_submit;
1715 spin_unlock_irqrestore(&d40c->lock, flags);
1720 d40_desc_free(d40c, d40d);
1721 spin_unlock_irqrestore(&d40c->lock, flags);
1724 EXPORT_SYMBOL(stedma40_memcpy_sg);
1726 bool stedma40_filter(struct dma_chan *chan, void *data)
1728 struct stedma40_chan_cfg *info = data;
1729 struct d40_chan *d40c =
1730 container_of(chan, struct d40_chan, chan);
1734 err = d40_validate_conf(d40c, info);
1736 d40c->dma_cfg = *info;
1738 err = d40_config_memcpy(d40c);
1741 d40c->configured = true;
1745 EXPORT_SYMBOL(stedma40_filter);
1747 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1749 bool realtime = d40c->dma_cfg.realtime;
1750 bool highprio = d40c->dma_cfg.high_priority;
1751 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1752 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1753 u32 event = D40_TYPE_TO_EVENT(dev_type);
1754 u32 group = D40_TYPE_TO_GROUP(dev_type);
1755 u32 bit = 1 << event;
1757 /* Destination event lines are stored in the upper halfword */
1761 writel(bit, d40c->base->virtbase + prioreg + group * 4);
1762 writel(bit, d40c->base->virtbase + rtreg + group * 4);
1765 static void d40_set_prio_realtime(struct d40_chan *d40c)
1767 if (d40c->base->rev < 3)
1770 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1771 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1772 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1774 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
1775 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1776 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
1779 /* DMA ENGINE functions */
1780 static int d40_alloc_chan_resources(struct dma_chan *chan)
1783 unsigned long flags;
1784 struct d40_chan *d40c =
1785 container_of(chan, struct d40_chan, chan);
1787 spin_lock_irqsave(&d40c->lock, flags);
1789 d40c->completed = chan->cookie = 1;
1791 /* If no dma configuration is set use default configuration (memcpy) */
1792 if (!d40c->configured) {
1793 err = d40_config_memcpy(d40c);
1795 chan_err(d40c, "Failed to configure memcpy channel\n");
1799 is_free_phy = (d40c->phy_chan == NULL);
1801 err = d40_allocate_channel(d40c);
1803 chan_err(d40c, "Failed to allocate channel\n");
1807 /* Fill in basic CFG register values */
1808 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1809 &d40c->dst_def_cfg, chan_is_logical(d40c));
1811 d40_set_prio_realtime(d40c);
1813 if (chan_is_logical(d40c)) {
1814 d40_log_cfg(&d40c->dma_cfg,
1815 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1817 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1818 d40c->lcpa = d40c->base->lcpa_base +
1819 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1821 d40c->lcpa = d40c->base->lcpa_base +
1822 d40c->dma_cfg.dst_dev_type *
1823 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1827 * Only write channel configuration to the DMA if the physical
1828 * resource is free. In case of multiple logical channels
1829 * on the same physical resource, only the first write is necessary.
1832 d40_config_write(d40c);
1834 spin_unlock_irqrestore(&d40c->lock, flags);
1838 static void d40_free_chan_resources(struct dma_chan *chan)
1840 struct d40_chan *d40c =
1841 container_of(chan, struct d40_chan, chan);
1843 unsigned long flags;
1845 if (d40c->phy_chan == NULL) {
1846 chan_err(d40c, "Cannot free unallocated channel\n");
1851 spin_lock_irqsave(&d40c->lock, flags);
1853 err = d40_free_dma(d40c);
1856 chan_err(d40c, "Failed to free channel\n");
1857 spin_unlock_irqrestore(&d40c->lock, flags);
1860 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1864 unsigned long dma_flags)
1866 struct scatterlist dst_sg;
1867 struct scatterlist src_sg;
1869 sg_init_table(&dst_sg, 1);
1870 sg_init_table(&src_sg, 1);
1872 sg_dma_address(&dst_sg) = dst;
1873 sg_dma_address(&src_sg) = src;
1875 sg_dma_len(&dst_sg) = size;
1876 sg_dma_len(&src_sg) = size;
1878 return stedma40_memcpy_sg(chan, &dst_sg, &src_sg, 1, dma_flags);
1881 static struct dma_async_tx_descriptor *
1882 d40_prep_sg(struct dma_chan *chan,
1883 struct scatterlist *dst_sg, unsigned int dst_nents,
1884 struct scatterlist *src_sg, unsigned int src_nents,
1885 unsigned long dma_flags)
1887 if (dst_nents != src_nents)
1890 return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
1893 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1894 struct d40_chan *d40c,
1895 struct scatterlist *sgl,
1896 unsigned int sg_len,
1897 enum dma_data_direction direction,
1898 unsigned long dma_flags)
1900 dma_addr_t dev_addr = 0;
1903 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
1904 d40c->dma_cfg.src_info.data_width,
1905 d40c->dma_cfg.dst_info.data_width);
1906 if (d40d->lli_len < 0) {
1907 chan_err(d40c, "Unaligned size\n");
1911 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) {
1912 chan_err(d40c, "Out of memory\n");
1916 d40d->lli_current = 0;
1918 if (direction == DMA_FROM_DEVICE)
1919 if (d40c->runtime_addr)
1920 dev_addr = d40c->runtime_addr;
1922 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1923 else if (direction == DMA_TO_DEVICE)
1924 if (d40c->runtime_addr)
1925 dev_addr = d40c->runtime_addr;
1927 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1932 total_size = d40_log_sg_to_dev(sgl, sg_len,
1935 d40c->dma_cfg.src_info.data_width,
1936 d40c->dma_cfg.dst_info.data_width,
1946 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1947 struct d40_chan *d40c,
1948 struct scatterlist *sgl,
1949 unsigned int sgl_len,
1950 enum dma_data_direction direction,
1951 unsigned long dma_flags)
1953 dma_addr_t src_dev_addr;
1954 dma_addr_t dst_dev_addr;
1957 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
1958 d40c->dma_cfg.src_info.data_width,
1959 d40c->dma_cfg.dst_info.data_width);
1960 if (d40d->lli_len < 0) {
1961 chan_err(d40c, "Unaligned size\n");
1965 if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
1966 chan_err(d40c, "Out of memory\n");
1970 d40d->lli_current = 0;
1972 if (direction == DMA_FROM_DEVICE) {
1974 if (d40c->runtime_addr)
1975 src_dev_addr = d40c->runtime_addr;
1977 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1978 } else if (direction == DMA_TO_DEVICE) {
1979 if (d40c->runtime_addr)
1980 dst_dev_addr = d40c->runtime_addr;
1982 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1987 res = d40_phy_sg_to_lli(sgl,
1991 virt_to_phys(d40d->lli_phy.src),
1993 d40c->dma_cfg.src_info.data_width,
1994 d40c->dma_cfg.dst_info.data_width,
1995 d40c->dma_cfg.src_info.psize);
1999 res = d40_phy_sg_to_lli(sgl,
2003 virt_to_phys(d40d->lli_phy.dst),
2005 d40c->dma_cfg.dst_info.data_width,
2006 d40c->dma_cfg.src_info.data_width,
2007 d40c->dma_cfg.dst_info.psize);
2011 dma_sync_single_for_device(d40c->base->dev, d40d->lli_pool.dma_addr,
2012 d40d->lli_pool.size, DMA_TO_DEVICE);
2016 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2017 struct scatterlist *sgl,
2018 unsigned int sg_len,
2019 enum dma_data_direction direction,
2020 unsigned long dma_flags)
2022 struct d40_desc *d40d;
2023 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2025 unsigned long flags;
2028 if (d40c->phy_chan == NULL) {
2029 chan_err(d40c, "Cannot prepare unallocated channel\n");
2030 return ERR_PTR(-EINVAL);
2033 spin_lock_irqsave(&d40c->lock, flags);
2034 d40d = d40_desc_get(d40c);
2039 if (chan_is_logical(d40c))
2040 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2041 direction, dma_flags);
2043 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2044 direction, dma_flags);
2046 chan_err(d40c, "Failed to prepare %s slave sg job: %d\n",
2047 chan_is_logical(d40c) ? "log" : "phy", err);
2051 d40d->txd.flags = dma_flags;
2053 dma_async_tx_descriptor_init(&d40d->txd, chan);
2055 d40d->txd.tx_submit = d40_tx_submit;
2057 spin_unlock_irqrestore(&d40c->lock, flags);
2062 d40_desc_free(d40c, d40d);
2063 spin_unlock_irqrestore(&d40c->lock, flags);
2067 static enum dma_status d40_tx_status(struct dma_chan *chan,
2068 dma_cookie_t cookie,
2069 struct dma_tx_state *txstate)
2071 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2072 dma_cookie_t last_used;
2073 dma_cookie_t last_complete;
2076 if (d40c->phy_chan == NULL) {
2077 chan_err(d40c, "Cannot read status of unallocated channel\n");
2081 last_complete = d40c->completed;
2082 last_used = chan->cookie;
2084 if (d40_is_paused(d40c))
2087 ret = dma_async_is_complete(cookie, last_complete, last_used);
2089 dma_set_tx_state(txstate, last_complete, last_used,
2090 stedma40_residue(chan));
2095 static void d40_issue_pending(struct dma_chan *chan)
2097 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2098 unsigned long flags;
2100 if (d40c->phy_chan == NULL) {
2101 chan_err(d40c, "Channel is not allocated!\n");
2105 spin_lock_irqsave(&d40c->lock, flags);
2107 /* Busy means that pending jobs are already being processed */
2109 (void) d40_queue_start(d40c);
2111 spin_unlock_irqrestore(&d40c->lock, flags);
2114 /* Runtime reconfiguration extension */
2115 static void d40_set_runtime_config(struct dma_chan *chan,
2116 struct dma_slave_config *config)
2118 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2119 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2120 enum dma_slave_buswidth config_addr_width;
2121 dma_addr_t config_addr;
2122 u32 config_maxburst;
2123 enum stedma40_periph_data_width addr_width;
2126 if (config->direction == DMA_FROM_DEVICE) {
2127 dma_addr_t dev_addr_rx =
2128 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2130 config_addr = config->src_addr;
2132 dev_dbg(d40c->base->dev,
2133 "channel has a pre-wired RX address %08x "
2134 "overriding with %08x\n",
2135 dev_addr_rx, config_addr);
2136 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2137 dev_dbg(d40c->base->dev,
2138 "channel was not configured for peripheral "
2139 "to memory transfer (%d) overriding\n",
2141 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2143 config_addr_width = config->src_addr_width;
2144 config_maxburst = config->src_maxburst;
2146 } else if (config->direction == DMA_TO_DEVICE) {
2147 dma_addr_t dev_addr_tx =
2148 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2150 config_addr = config->dst_addr;
2152 dev_dbg(d40c->base->dev,
2153 "channel has a pre-wired TX address %08x "
2154 "overriding with %08x\n",
2155 dev_addr_tx, config_addr);
2156 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2157 dev_dbg(d40c->base->dev,
2158 "channel was not configured for memory "
2159 "to peripheral transfer (%d) overriding\n",
2161 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2163 config_addr_width = config->dst_addr_width;
2164 config_maxburst = config->dst_maxburst;
2167 dev_err(d40c->base->dev,
2168 "unrecognized channel direction %d\n",
2173 switch (config_addr_width) {
2174 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2175 addr_width = STEDMA40_BYTE_WIDTH;
2177 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2178 addr_width = STEDMA40_HALFWORD_WIDTH;
2180 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2181 addr_width = STEDMA40_WORD_WIDTH;
2183 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2184 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2187 dev_err(d40c->base->dev,
2188 "illegal peripheral address width "
2190 config->src_addr_width);
2194 if (chan_is_logical(d40c)) {
2195 if (config_maxburst >= 16)
2196 psize = STEDMA40_PSIZE_LOG_16;
2197 else if (config_maxburst >= 8)
2198 psize = STEDMA40_PSIZE_LOG_8;
2199 else if (config_maxburst >= 4)
2200 psize = STEDMA40_PSIZE_LOG_4;
2202 psize = STEDMA40_PSIZE_LOG_1;
2204 if (config_maxburst >= 16)
2205 psize = STEDMA40_PSIZE_PHY_16;
2206 else if (config_maxburst >= 8)
2207 psize = STEDMA40_PSIZE_PHY_8;
2208 else if (config_maxburst >= 4)
2209 psize = STEDMA40_PSIZE_PHY_4;
2210 else if (config_maxburst >= 2)
2211 psize = STEDMA40_PSIZE_PHY_2;
2213 psize = STEDMA40_PSIZE_PHY_1;
2216 /* Set up all the endpoint configs */
2217 cfg->src_info.data_width = addr_width;
2218 cfg->src_info.psize = psize;
2219 cfg->src_info.big_endian = false;
2220 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2221 cfg->dst_info.data_width = addr_width;
2222 cfg->dst_info.psize = psize;
2223 cfg->dst_info.big_endian = false;
2224 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2226 /* Fill in register values */
2227 if (chan_is_logical(d40c))
2228 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2230 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2231 &d40c->dst_def_cfg, false);
2233 /* These settings will take precedence later */
2234 d40c->runtime_addr = config_addr;
2235 d40c->runtime_direction = config->direction;
2236 dev_dbg(d40c->base->dev,
2237 "configured channel %s for %s, data width %d, "
2238 "maxburst %d bytes, LE, no flow control\n",
2239 dma_chan_name(chan),
2240 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2245 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2248 unsigned long flags;
2249 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2251 if (d40c->phy_chan == NULL) {
2252 chan_err(d40c, "Channel is not allocated!\n");
2257 case DMA_TERMINATE_ALL:
2258 spin_lock_irqsave(&d40c->lock, flags);
2260 spin_unlock_irqrestore(&d40c->lock, flags);
2263 return d40_pause(chan);
2265 return d40_resume(chan);
2266 case DMA_SLAVE_CONFIG:
2267 d40_set_runtime_config(chan,
2268 (struct dma_slave_config *) arg);
2274 /* Other commands are unimplemented */
2278 /* Initialization functions */
2280 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2281 struct d40_chan *chans, int offset,
2285 struct d40_chan *d40c;
2287 INIT_LIST_HEAD(&dma->channels);
2289 for (i = offset; i < offset + num_chans; i++) {
2292 d40c->chan.device = dma;
2294 spin_lock_init(&d40c->lock);
2296 d40c->log_num = D40_PHY_CHAN;
2298 INIT_LIST_HEAD(&d40c->active);
2299 INIT_LIST_HEAD(&d40c->queue);
2300 INIT_LIST_HEAD(&d40c->client);
2302 tasklet_init(&d40c->tasklet, dma_tasklet,
2303 (unsigned long) d40c);
2305 list_add_tail(&d40c->chan.device_node,
2310 static int __init d40_dmaengine_init(struct d40_base *base,
2311 int num_reserved_chans)
2315 d40_chan_init(base, &base->dma_slave, base->log_chans,
2316 0, base->num_log_chans);
2318 dma_cap_zero(base->dma_slave.cap_mask);
2319 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2321 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2322 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2323 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2324 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2325 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2326 base->dma_slave.device_tx_status = d40_tx_status;
2327 base->dma_slave.device_issue_pending = d40_issue_pending;
2328 base->dma_slave.device_control = d40_control;
2329 base->dma_slave.dev = base->dev;
2331 err = dma_async_device_register(&base->dma_slave);
2334 d40_err(base->dev, "Failed to register slave channels\n");
2338 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2339 base->num_log_chans, base->plat_data->memcpy_len);
2341 dma_cap_zero(base->dma_memcpy.cap_mask);
2342 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2343 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2345 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2346 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2347 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2348 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2349 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2350 base->dma_memcpy.device_tx_status = d40_tx_status;
2351 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2352 base->dma_memcpy.device_control = d40_control;
2353 base->dma_memcpy.dev = base->dev;
2355 * This controller can only access address at even
2356 * 32bit boundaries, i.e. 2^2
2358 base->dma_memcpy.copy_align = 2;
2360 err = dma_async_device_register(&base->dma_memcpy);
2364 "Failed to regsiter memcpy only channels\n");
2368 d40_chan_init(base, &base->dma_both, base->phy_chans,
2369 0, num_reserved_chans);
2371 dma_cap_zero(base->dma_both.cap_mask);
2372 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2373 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2374 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2376 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2377 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2378 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2379 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2380 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2381 base->dma_both.device_tx_status = d40_tx_status;
2382 base->dma_both.device_issue_pending = d40_issue_pending;
2383 base->dma_both.device_control = d40_control;
2384 base->dma_both.dev = base->dev;
2385 base->dma_both.copy_align = 2;
2386 err = dma_async_device_register(&base->dma_both);
2390 "Failed to register logical and physical capable channels\n");
2395 dma_async_device_unregister(&base->dma_memcpy);
2397 dma_async_device_unregister(&base->dma_slave);
2402 /* Initialization functions. */
2404 static int __init d40_phy_res_init(struct d40_base *base)
2407 int num_phy_chans_avail = 0;
2409 int odd_even_bit = -2;
2411 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2412 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2414 for (i = 0; i < base->num_phy_chans; i++) {
2415 base->phy_res[i].num = i;
2416 odd_even_bit += 2 * ((i % 2) == 0);
2417 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2418 /* Mark security only channels as occupied */
2419 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2420 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2422 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2423 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2424 num_phy_chans_avail++;
2426 spin_lock_init(&base->phy_res[i].lock);
2429 /* Mark disabled channels as occupied */
2430 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2431 int chan = base->plat_data->disabled_channels[i];
2433 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2434 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2435 num_phy_chans_avail--;
2438 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2439 num_phy_chans_avail, base->num_phy_chans);
2441 /* Verify settings extended vs standard */
2442 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2444 for (i = 0; i < base->num_phy_chans; i++) {
2446 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2447 (val[0] & 0x3) != 1)
2449 "[%s] INFO: channel %d is misconfigured (%d)\n",
2450 __func__, i, val[0] & 0x3);
2452 val[0] = val[0] >> 2;
2455 return num_phy_chans_avail;
2458 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2460 static const struct d40_reg_val dma_id_regs[] = {
2462 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2463 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2465 * D40_DREG_PERIPHID2 Depends on HW revision:
2466 * DB8500ed has 0x0008,
2468 * DB8500v1 has 0x0028
2469 * DB8500v2 has 0x0038
2471 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2474 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2475 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2476 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2477 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2479 struct stedma40_platform_data *plat_data;
2480 struct clk *clk = NULL;
2481 void __iomem *virtbase = NULL;
2482 struct resource *res = NULL;
2483 struct d40_base *base = NULL;
2484 int num_log_chans = 0;
2490 clk = clk_get(&pdev->dev, NULL);
2493 d40_err(&pdev->dev, "No matching clock found\n");
2499 /* Get IO for DMAC base address */
2500 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2504 if (request_mem_region(res->start, resource_size(res),
2505 D40_NAME " I/O base") == NULL)
2508 virtbase = ioremap(res->start, resource_size(res));
2512 /* HW version check */
2513 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2514 if (dma_id_regs[i].val !=
2515 readl(virtbase + dma_id_regs[i].reg)) {
2517 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2520 readl(virtbase + dma_id_regs[i].reg));
2525 /* Get silicon revision and designer */
2526 val = readl(virtbase + D40_DREG_PERIPHID2);
2528 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2530 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2531 val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2536 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2537 D40_DREG_PERIPHID2_REV_POS;
2539 /* The number of physical channels on this HW */
2540 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2542 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2545 plat_data = pdev->dev.platform_data;
2547 /* Count the number of logical channels in use */
2548 for (i = 0; i < plat_data->dev_len; i++)
2549 if (plat_data->dev_rx[i] != 0)
2552 for (i = 0; i < plat_data->dev_len; i++)
2553 if (plat_data->dev_tx[i] != 0)
2556 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2557 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2558 sizeof(struct d40_chan), GFP_KERNEL);
2561 d40_err(&pdev->dev, "Out of memory\n");
2567 base->num_phy_chans = num_phy_chans;
2568 base->num_log_chans = num_log_chans;
2569 base->phy_start = res->start;
2570 base->phy_size = resource_size(res);
2571 base->virtbase = virtbase;
2572 base->plat_data = plat_data;
2573 base->dev = &pdev->dev;
2574 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2575 base->log_chans = &base->phy_chans[num_phy_chans];
2577 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2582 base->lookup_phy_chans = kzalloc(num_phy_chans *
2583 sizeof(struct d40_chan *),
2585 if (!base->lookup_phy_chans)
2588 if (num_log_chans + plat_data->memcpy_len) {
2590 * The max number of logical channels are event lines for all
2591 * src devices and dst devices
2593 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2594 sizeof(struct d40_chan *),
2596 if (!base->lookup_log_chans)
2600 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2601 sizeof(struct d40_desc *) *
2602 D40_LCLA_LINK_PER_EVENT_GRP,
2604 if (!base->lcla_pool.alloc_map)
2607 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2608 0, SLAB_HWCACHE_ALIGN,
2610 if (base->desc_slab == NULL)
2623 release_mem_region(res->start,
2624 resource_size(res));
2629 kfree(base->lcla_pool.alloc_map);
2630 kfree(base->lookup_log_chans);
2631 kfree(base->lookup_phy_chans);
2632 kfree(base->phy_res);
2639 static void __init d40_hw_init(struct d40_base *base)
2642 static const struct d40_reg_val dma_init_reg[] = {
2643 /* Clock every part of the DMA block from start */
2644 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2646 /* Interrupts on all logical channels */
2647 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2648 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2649 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2650 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2651 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2652 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2653 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2654 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2655 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2656 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2657 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2658 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2661 u32 prmseo[2] = {0, 0};
2662 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2666 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2667 writel(dma_init_reg[i].val,
2668 base->virtbase + dma_init_reg[i].reg);
2670 /* Configure all our dma channels to default settings */
2671 for (i = 0; i < base->num_phy_chans; i++) {
2673 activeo[i % 2] = activeo[i % 2] << 2;
2675 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2677 activeo[i % 2] |= 3;
2681 /* Enable interrupt # */
2682 pcmis = (pcmis << 1) | 1;
2684 /* Clear interrupt # */
2685 pcicr = (pcicr << 1) | 1;
2687 /* Set channel to physical mode */
2688 prmseo[i % 2] = prmseo[i % 2] << 2;
2693 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2694 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2695 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2696 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2698 /* Write which interrupt to enable */
2699 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2701 /* Write which interrupt to clear */
2702 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2706 static int __init d40_lcla_allocate(struct d40_base *base)
2708 struct d40_lcla_pool *pool = &base->lcla_pool;
2709 unsigned long *page_list;
2714 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2715 * To full fill this hardware requirement without wasting 256 kb
2716 * we allocate pages until we get an aligned one.
2718 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2726 /* Calculating how many pages that are required */
2727 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2729 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2730 page_list[i] = __get_free_pages(GFP_KERNEL,
2731 base->lcla_pool.pages);
2732 if (!page_list[i]) {
2734 d40_err(base->dev, "Failed to allocate %d pages.\n",
2735 base->lcla_pool.pages);
2737 for (j = 0; j < i; j++)
2738 free_pages(page_list[j], base->lcla_pool.pages);
2742 if ((virt_to_phys((void *)page_list[i]) &
2743 (LCLA_ALIGNMENT - 1)) == 0)
2747 for (j = 0; j < i; j++)
2748 free_pages(page_list[j], base->lcla_pool.pages);
2750 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2751 base->lcla_pool.base = (void *)page_list[i];
2754 * After many attempts and no succees with finding the correct
2755 * alignment, try with allocating a big buffer.
2758 "[%s] Failed to get %d pages @ 18 bit align.\n",
2759 __func__, base->lcla_pool.pages);
2760 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2761 base->num_phy_chans +
2764 if (!base->lcla_pool.base_unaligned) {
2769 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2773 pool->dma_addr = dma_map_single(base->dev, pool->base,
2774 SZ_1K * base->num_phy_chans,
2776 if (dma_mapping_error(base->dev, pool->dma_addr)) {
2782 writel(virt_to_phys(base->lcla_pool.base),
2783 base->virtbase + D40_DREG_LCLA);
2789 static int __init d40_probe(struct platform_device *pdev)
2793 struct d40_base *base;
2794 struct resource *res = NULL;
2795 int num_reserved_chans;
2798 base = d40_hw_detect_init(pdev);
2803 num_reserved_chans = d40_phy_res_init(base);
2805 platform_set_drvdata(pdev, base);
2807 spin_lock_init(&base->interrupt_lock);
2808 spin_lock_init(&base->execmd_lock);
2810 /* Get IO for logical channel parameter address */
2811 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2814 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2817 base->lcpa_size = resource_size(res);
2818 base->phy_lcpa = res->start;
2820 if (request_mem_region(res->start, resource_size(res),
2821 D40_NAME " I/O lcpa") == NULL) {
2824 "Failed to request LCPA region 0x%x-0x%x\n",
2825 res->start, res->end);
2829 /* We make use of ESRAM memory for this. */
2830 val = readl(base->virtbase + D40_DREG_LCPA);
2831 if (res->start != val && val != 0) {
2832 dev_warn(&pdev->dev,
2833 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2834 __func__, val, res->start);
2836 writel(res->start, base->virtbase + D40_DREG_LCPA);
2838 base->lcpa_base = ioremap(res->start, resource_size(res));
2839 if (!base->lcpa_base) {
2841 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2845 ret = d40_lcla_allocate(base);
2847 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2851 spin_lock_init(&base->lcla_pool.lock);
2853 base->irq = platform_get_irq(pdev, 0);
2855 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2857 d40_err(&pdev->dev, "No IRQ defined\n");
2861 err = d40_dmaengine_init(base, num_reserved_chans);
2867 dev_info(base->dev, "initialized\n");
2872 if (base->desc_slab)
2873 kmem_cache_destroy(base->desc_slab);
2875 iounmap(base->virtbase);
2877 if (base->lcla_pool.dma_addr)
2878 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2879 SZ_1K * base->num_phy_chans,
2882 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2883 free_pages((unsigned long)base->lcla_pool.base,
2884 base->lcla_pool.pages);
2886 kfree(base->lcla_pool.base_unaligned);
2889 release_mem_region(base->phy_lcpa,
2891 if (base->phy_start)
2892 release_mem_region(base->phy_start,
2895 clk_disable(base->clk);
2899 kfree(base->lcla_pool.alloc_map);
2900 kfree(base->lookup_log_chans);
2901 kfree(base->lookup_phy_chans);
2902 kfree(base->phy_res);
2906 d40_err(&pdev->dev, "probe failed\n");
2910 static struct platform_driver d40_driver = {
2912 .owner = THIS_MODULE,
2917 static int __init stedma40_init(void)
2919 return platform_driver_probe(&d40_driver, d40_probe);
2921 arch_initcall(stedma40_init);