2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/clk.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
28 #include <linux/module.h>
30 #include <linux/of_device.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/slab.h>
36 #include "dmaengine.h"
38 #define TEGRA_APBDMA_GENERAL 0x0
39 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
41 #define TEGRA_APBDMA_CONTROL 0x010
42 #define TEGRA_APBDMA_IRQ_MASK 0x01c
43 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
46 #define TEGRA_APBDMA_CHAN_CSR 0x00
47 #define TEGRA_APBDMA_CSR_ENB BIT(31)
48 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
49 #define TEGRA_APBDMA_CSR_HOLD BIT(29)
50 #define TEGRA_APBDMA_CSR_DIR BIT(28)
51 #define TEGRA_APBDMA_CSR_ONCE BIT(27)
52 #define TEGRA_APBDMA_CSR_FLOW BIT(21)
53 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
54 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
57 #define TEGRA_APBDMA_CHAN_STATUS 0x004
58 #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
59 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
60 #define TEGRA_APBDMA_STATUS_HALT BIT(29)
61 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
62 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
63 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
65 #define TEGRA_APBDMA_CHAN_CSRE 0x00C
66 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
68 /* AHB memory address */
69 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
71 /* AHB sequence register */
72 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
73 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
76 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
77 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
78 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
79 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
80 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
81 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
82 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
83 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
84 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
85 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
88 #define TEGRA_APBDMA_CHAN_APBPTR 0x018
90 /* APB sequence register */
91 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
94 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
95 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
96 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
97 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
98 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
101 * If any burst is in flight and DMA paused then this is the time to complete
102 * on-flight burst and update DMA status register.
104 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
106 /* Channel base address offset from APBDMA base address */
107 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
109 /* DMA channel register space size */
110 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
115 * tegra_dma_chip_data Tegra chip specific DMA data
116 * @nr_channels: Number of channels available in the controller.
117 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
118 * @support_channel_pause: Support channel wise pause of dma.
120 struct tegra_dma_chip_data {
123 bool support_channel_pause;
126 /* DMA channel registers */
127 struct tegra_dma_channel_regs {
129 unsigned long ahb_ptr;
130 unsigned long apb_ptr;
131 unsigned long ahb_seq;
132 unsigned long apb_seq;
136 * tegra_dma_sg_req: Dma request details to configure hardware. This
137 * contains the details for one transfer to configure DMA hw.
138 * The client's request for data transfer can be broken into multiple
139 * sub-transfer as per requester details and hw support.
140 * This sub transfer get added in the list of transfer and point to Tegra
141 * DMA descriptor which manages the transfer details.
143 struct tegra_dma_sg_req {
144 struct tegra_dma_channel_regs ch_regs;
149 struct list_head node;
150 struct tegra_dma_desc *dma_desc;
154 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
155 * This descriptor keep track of transfer status, callbacks and request
158 struct tegra_dma_desc {
159 struct dma_async_tx_descriptor txd;
161 int bytes_transferred;
162 enum dma_status dma_status;
163 struct list_head node;
164 struct list_head tx_list;
165 struct list_head cb_node;
169 struct tegra_dma_channel;
171 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
174 /* tegra_dma_channel: Channel specific information */
175 struct tegra_dma_channel {
176 struct dma_chan dma_chan;
181 unsigned long chan_base_offset;
184 struct tegra_dma *tdma;
187 /* Different lists for managing the requests */
188 struct list_head free_sg_req;
189 struct list_head pending_sg_req;
190 struct list_head free_dma_desc;
191 struct list_head cb_desc;
193 /* ISR handler and tasklet for bottom half of isr handling */
194 dma_isr_handler isr_handler;
195 struct tasklet_struct tasklet;
196 dma_async_tx_callback callback;
197 void *callback_param;
199 /* Channel-slave specific configuration */
200 struct dma_slave_config dma_sconfig;
203 /* tegra_dma: Tegra DMA specific information */
205 struct dma_device dma_dev;
208 spinlock_t global_lock;
209 void __iomem *base_addr;
210 const struct tegra_dma_chip_data *chip_data;
212 /* Some register need to be cache before suspend */
215 /* Last member of the structure */
216 struct tegra_dma_channel channels[0];
219 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
221 writel(val, tdma->base_addr + reg);
224 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
226 return readl(tdma->base_addr + reg);
229 static inline void tdc_write(struct tegra_dma_channel *tdc,
232 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
235 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
237 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
240 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
242 return container_of(dc, struct tegra_dma_channel, dma_chan);
245 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
246 struct dma_async_tx_descriptor *td)
248 return container_of(td, struct tegra_dma_desc, txd);
251 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
253 return &tdc->dma_chan.dev->device;
256 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
257 static int tegra_dma_runtime_suspend(struct device *dev);
258 static int tegra_dma_runtime_resume(struct device *dev);
260 /* Get DMA desc from free list, if not there then allocate it. */
261 static struct tegra_dma_desc *tegra_dma_desc_get(
262 struct tegra_dma_channel *tdc)
264 struct tegra_dma_desc *dma_desc;
267 spin_lock_irqsave(&tdc->lock, flags);
269 /* Do not allocate if desc are waiting for ack */
270 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
271 if (async_tx_test_ack(&dma_desc->txd)) {
272 list_del(&dma_desc->node);
273 spin_unlock_irqrestore(&tdc->lock, flags);
278 spin_unlock_irqrestore(&tdc->lock, flags);
280 /* Allocate DMA desc */
281 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
283 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
287 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
288 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
289 dma_desc->txd.flags = 0;
293 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
294 struct tegra_dma_desc *dma_desc)
298 spin_lock_irqsave(&tdc->lock, flags);
299 if (!list_empty(&dma_desc->tx_list))
300 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
301 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
302 spin_unlock_irqrestore(&tdc->lock, flags);
305 static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
306 struct tegra_dma_channel *tdc)
308 struct tegra_dma_sg_req *sg_req = NULL;
311 spin_lock_irqsave(&tdc->lock, flags);
312 if (!list_empty(&tdc->free_sg_req)) {
313 sg_req = list_first_entry(&tdc->free_sg_req,
314 typeof(*sg_req), node);
315 list_del(&sg_req->node);
316 spin_unlock_irqrestore(&tdc->lock, flags);
319 spin_unlock_irqrestore(&tdc->lock, flags);
321 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
323 dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
327 static int tegra_dma_slave_config(struct dma_chan *dc,
328 struct dma_slave_config *sconfig)
330 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
332 if (!list_empty(&tdc->pending_sg_req)) {
333 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
337 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
338 tdc->config_init = true;
342 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
343 bool wait_for_burst_complete)
345 struct tegra_dma *tdma = tdc->tdma;
347 spin_lock(&tdma->global_lock);
348 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
349 if (wait_for_burst_complete)
350 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
353 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
355 struct tegra_dma *tdma = tdc->tdma;
357 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
358 spin_unlock(&tdma->global_lock);
361 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
362 bool wait_for_burst_complete)
364 struct tegra_dma *tdma = tdc->tdma;
366 if (tdma->chip_data->support_channel_pause) {
367 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
368 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
369 if (wait_for_burst_complete)
370 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
372 tegra_dma_global_pause(tdc, wait_for_burst_complete);
376 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
378 struct tegra_dma *tdma = tdc->tdma;
380 if (tdma->chip_data->support_channel_pause) {
381 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
383 tegra_dma_global_resume(tdc);
387 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
392 /* Disable interrupts */
393 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
394 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
395 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
398 csr &= ~TEGRA_APBDMA_CSR_ENB;
399 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
401 /* Clear interrupt status if it is there */
402 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
403 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
404 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
405 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
410 static void tegra_dma_start(struct tegra_dma_channel *tdc,
411 struct tegra_dma_sg_req *sg_req)
413 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
415 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
416 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
417 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
418 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
419 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
422 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
423 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
426 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
427 struct tegra_dma_sg_req *nsg_req)
429 unsigned long status;
432 * The DMA controller reloads the new configuration for next transfer
433 * after last burst of current transfer completes.
434 * If there is no IEC status then this makes sure that last burst
435 * has not be completed. There may be case that last burst is on
436 * flight and so it can complete but because DMA is paused, it
437 * will not generates interrupt as well as not reload the new
439 * If there is already IEC status then interrupt handler need to
440 * load new configuration.
442 tegra_dma_pause(tdc, false);
443 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
446 * If interrupt is pending then do nothing as the ISR will handle
447 * the programing for new request.
449 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
450 dev_err(tdc2dev(tdc),
451 "Skipping new configuration as interrupt is pending\n");
452 tegra_dma_resume(tdc);
456 /* Safe to program new configuration */
457 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
458 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
459 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
460 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
461 nsg_req->configured = true;
463 tegra_dma_resume(tdc);
466 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
468 struct tegra_dma_sg_req *sg_req;
470 if (list_empty(&tdc->pending_sg_req))
473 sg_req = list_first_entry(&tdc->pending_sg_req,
474 typeof(*sg_req), node);
475 tegra_dma_start(tdc, sg_req);
476 sg_req->configured = true;
480 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
482 struct tegra_dma_sg_req *hsgreq;
483 struct tegra_dma_sg_req *hnsgreq;
485 if (list_empty(&tdc->pending_sg_req))
488 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
489 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
490 hnsgreq = list_first_entry(&hsgreq->node,
491 typeof(*hnsgreq), node);
492 tegra_dma_configure_for_next(tdc, hnsgreq);
496 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
497 struct tegra_dma_sg_req *sg_req, unsigned long status)
499 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
502 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
504 struct tegra_dma_sg_req *sgreq;
505 struct tegra_dma_desc *dma_desc;
507 while (!list_empty(&tdc->pending_sg_req)) {
508 sgreq = list_first_entry(&tdc->pending_sg_req,
509 typeof(*sgreq), node);
510 list_move_tail(&sgreq->node, &tdc->free_sg_req);
511 if (sgreq->last_sg) {
512 dma_desc = sgreq->dma_desc;
513 dma_desc->dma_status = DMA_ERROR;
514 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
516 /* Add in cb list if it is not there. */
517 if (!dma_desc->cb_count)
518 list_add_tail(&dma_desc->cb_node,
520 dma_desc->cb_count++;
523 tdc->isr_handler = NULL;
526 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
527 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
529 struct tegra_dma_sg_req *hsgreq = NULL;
531 if (list_empty(&tdc->pending_sg_req)) {
532 dev_err(tdc2dev(tdc), "Dma is running without req\n");
538 * Check that head req on list should be in flight.
539 * If it is not in flight then abort transfer as
540 * looping of transfer can not continue.
542 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
543 if (!hsgreq->configured) {
545 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
546 tegra_dma_abort_all(tdc);
550 /* Configure next request */
552 tdc_configure_next_head_desc(tdc);
556 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
559 struct tegra_dma_sg_req *sgreq;
560 struct tegra_dma_desc *dma_desc;
563 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
564 dma_desc = sgreq->dma_desc;
565 dma_desc->bytes_transferred += sgreq->req_len;
567 list_del(&sgreq->node);
568 if (sgreq->last_sg) {
569 dma_desc->dma_status = DMA_SUCCESS;
570 dma_cookie_complete(&dma_desc->txd);
571 if (!dma_desc->cb_count)
572 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
573 dma_desc->cb_count++;
574 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
576 list_add_tail(&sgreq->node, &tdc->free_sg_req);
578 /* Do not start DMA if it is going to be terminate */
579 if (to_terminate || list_empty(&tdc->pending_sg_req))
582 tdc_start_head_req(tdc);
586 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
589 struct tegra_dma_sg_req *sgreq;
590 struct tegra_dma_desc *dma_desc;
593 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
594 dma_desc = sgreq->dma_desc;
595 dma_desc->bytes_transferred += sgreq->req_len;
597 /* Callback need to be call */
598 if (!dma_desc->cb_count)
599 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
600 dma_desc->cb_count++;
602 /* If not last req then put at end of pending list */
603 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
604 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
605 sgreq->configured = false;
606 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
608 dma_desc->dma_status = DMA_ERROR;
613 static void tegra_dma_tasklet(unsigned long data)
615 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
616 dma_async_tx_callback callback = NULL;
617 void *callback_param = NULL;
618 struct tegra_dma_desc *dma_desc;
622 spin_lock_irqsave(&tdc->lock, flags);
623 while (!list_empty(&tdc->cb_desc)) {
624 dma_desc = list_first_entry(&tdc->cb_desc,
625 typeof(*dma_desc), cb_node);
626 list_del(&dma_desc->cb_node);
627 callback = dma_desc->txd.callback;
628 callback_param = dma_desc->txd.callback_param;
629 cb_count = dma_desc->cb_count;
630 dma_desc->cb_count = 0;
631 spin_unlock_irqrestore(&tdc->lock, flags);
632 while (cb_count-- && callback)
633 callback(callback_param);
634 spin_lock_irqsave(&tdc->lock, flags);
636 spin_unlock_irqrestore(&tdc->lock, flags);
639 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
641 struct tegra_dma_channel *tdc = dev_id;
642 unsigned long status;
645 spin_lock_irqsave(&tdc->lock, flags);
647 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
648 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
649 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
650 tdc->isr_handler(tdc, false);
651 tasklet_schedule(&tdc->tasklet);
652 spin_unlock_irqrestore(&tdc->lock, flags);
656 spin_unlock_irqrestore(&tdc->lock, flags);
657 dev_info(tdc2dev(tdc),
658 "Interrupt already served status 0x%08lx\n", status);
662 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
664 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
665 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
669 spin_lock_irqsave(&tdc->lock, flags);
670 dma_desc->dma_status = DMA_IN_PROGRESS;
671 cookie = dma_cookie_assign(&dma_desc->txd);
672 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
673 spin_unlock_irqrestore(&tdc->lock, flags);
677 static void tegra_dma_issue_pending(struct dma_chan *dc)
679 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
682 spin_lock_irqsave(&tdc->lock, flags);
683 if (list_empty(&tdc->pending_sg_req)) {
684 dev_err(tdc2dev(tdc), "No DMA request\n");
688 tdc_start_head_req(tdc);
690 /* Continuous single mode: Configure next req */
693 * Wait for 1 burst time for configure DMA for
696 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
697 tdc_configure_next_head_desc(tdc);
701 spin_unlock_irqrestore(&tdc->lock, flags);
705 static void tegra_dma_terminate_all(struct dma_chan *dc)
707 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
708 struct tegra_dma_sg_req *sgreq;
709 struct tegra_dma_desc *dma_desc;
711 unsigned long status;
714 spin_lock_irqsave(&tdc->lock, flags);
715 if (list_empty(&tdc->pending_sg_req)) {
716 spin_unlock_irqrestore(&tdc->lock, flags);
723 /* Pause DMA before checking the queue status */
724 tegra_dma_pause(tdc, true);
726 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
727 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
728 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
729 tdc->isr_handler(tdc, true);
730 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
733 was_busy = tdc->busy;
736 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
737 sgreq = list_first_entry(&tdc->pending_sg_req,
738 typeof(*sgreq), node);
739 sgreq->dma_desc->bytes_transferred +=
740 get_current_xferred_count(tdc, sgreq, status);
742 tegra_dma_resume(tdc);
745 tegra_dma_abort_all(tdc);
747 while (!list_empty(&tdc->cb_desc)) {
748 dma_desc = list_first_entry(&tdc->cb_desc,
749 typeof(*dma_desc), cb_node);
750 list_del(&dma_desc->cb_node);
751 dma_desc->cb_count = 0;
753 spin_unlock_irqrestore(&tdc->lock, flags);
756 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
757 dma_cookie_t cookie, struct dma_tx_state *txstate)
759 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
760 struct tegra_dma_desc *dma_desc;
761 struct tegra_dma_sg_req *sg_req;
764 unsigned int residual;
766 spin_lock_irqsave(&tdc->lock, flags);
768 ret = dma_cookie_status(dc, cookie, txstate);
769 if (ret == DMA_SUCCESS) {
770 spin_unlock_irqrestore(&tdc->lock, flags);
774 /* Check on wait_ack desc status */
775 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
776 if (dma_desc->txd.cookie == cookie) {
777 residual = dma_desc->bytes_requested -
778 (dma_desc->bytes_transferred %
779 dma_desc->bytes_requested);
780 dma_set_residue(txstate, residual);
781 ret = dma_desc->dma_status;
782 spin_unlock_irqrestore(&tdc->lock, flags);
787 /* Check in pending list */
788 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
789 dma_desc = sg_req->dma_desc;
790 if (dma_desc->txd.cookie == cookie) {
791 residual = dma_desc->bytes_requested -
792 (dma_desc->bytes_transferred %
793 dma_desc->bytes_requested);
794 dma_set_residue(txstate, residual);
795 ret = dma_desc->dma_status;
796 spin_unlock_irqrestore(&tdc->lock, flags);
801 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
802 spin_unlock_irqrestore(&tdc->lock, flags);
806 static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
810 case DMA_SLAVE_CONFIG:
811 return tegra_dma_slave_config(dc,
812 (struct dma_slave_config *)arg);
814 case DMA_TERMINATE_ALL:
815 tegra_dma_terminate_all(dc);
825 static inline int get_bus_width(struct tegra_dma_channel *tdc,
826 enum dma_slave_buswidth slave_bw)
829 case DMA_SLAVE_BUSWIDTH_1_BYTE:
830 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
831 case DMA_SLAVE_BUSWIDTH_2_BYTES:
832 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
833 case DMA_SLAVE_BUSWIDTH_4_BYTES:
834 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
835 case DMA_SLAVE_BUSWIDTH_8_BYTES:
836 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
838 dev_warn(tdc2dev(tdc),
839 "slave bw is not supported, using 32bits\n");
840 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
844 static inline int get_burst_size(struct tegra_dma_channel *tdc,
845 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
851 * burst_size from client is in terms of the bus_width.
852 * convert them into AHB memory width which is 4 byte.
854 burst_byte = burst_size * slave_bw;
855 burst_ahb_width = burst_byte / 4;
857 /* If burst size is 0 then calculate the burst size based on length */
858 if (!burst_ahb_width) {
860 return TEGRA_APBDMA_AHBSEQ_BURST_1;
861 else if ((len >> 4) & 0x1)
862 return TEGRA_APBDMA_AHBSEQ_BURST_4;
864 return TEGRA_APBDMA_AHBSEQ_BURST_8;
866 if (burst_ahb_width < 4)
867 return TEGRA_APBDMA_AHBSEQ_BURST_1;
868 else if (burst_ahb_width < 8)
869 return TEGRA_APBDMA_AHBSEQ_BURST_4;
871 return TEGRA_APBDMA_AHBSEQ_BURST_8;
874 static int get_transfer_param(struct tegra_dma_channel *tdc,
875 enum dma_transfer_direction direction, unsigned long *apb_addr,
876 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
877 enum dma_slave_buswidth *slave_bw)
882 *apb_addr = tdc->dma_sconfig.dst_addr;
883 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
884 *burst_size = tdc->dma_sconfig.dst_maxburst;
885 *slave_bw = tdc->dma_sconfig.dst_addr_width;
886 *csr = TEGRA_APBDMA_CSR_DIR;
890 *apb_addr = tdc->dma_sconfig.src_addr;
891 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
892 *burst_size = tdc->dma_sconfig.src_maxburst;
893 *slave_bw = tdc->dma_sconfig.src_addr_width;
898 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
904 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
905 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
906 enum dma_transfer_direction direction, unsigned long flags,
909 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
910 struct tegra_dma_desc *dma_desc;
912 struct scatterlist *sg;
913 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
914 struct list_head req_list;
915 struct tegra_dma_sg_req *sg_req = NULL;
917 enum dma_slave_buswidth slave_bw;
920 if (!tdc->config_init) {
921 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
925 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
929 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
930 &burst_size, &slave_bw);
934 INIT_LIST_HEAD(&req_list);
936 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
937 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
938 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
939 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
941 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
942 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
943 if (flags & DMA_PREP_INTERRUPT)
944 csr |= TEGRA_APBDMA_CSR_IE_EOC;
946 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
948 dma_desc = tegra_dma_desc_get(tdc);
950 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
953 INIT_LIST_HEAD(&dma_desc->tx_list);
954 INIT_LIST_HEAD(&dma_desc->cb_node);
955 dma_desc->cb_count = 0;
956 dma_desc->bytes_requested = 0;
957 dma_desc->bytes_transferred = 0;
958 dma_desc->dma_status = DMA_IN_PROGRESS;
960 /* Make transfer requests */
961 for_each_sg(sgl, sg, sg_len, i) {
964 mem = sg_dma_address(sg);
965 len = sg_dma_len(sg);
967 if ((len & 3) || (mem & 3) ||
968 (len > tdc->tdma->chip_data->max_dma_count)) {
969 dev_err(tdc2dev(tdc),
970 "Dma length/memory address is not supported\n");
971 tegra_dma_desc_put(tdc, dma_desc);
975 sg_req = tegra_dma_sg_req_get(tdc);
977 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
978 tegra_dma_desc_put(tdc, dma_desc);
982 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
983 dma_desc->bytes_requested += len;
985 sg_req->ch_regs.apb_ptr = apb_ptr;
986 sg_req->ch_regs.ahb_ptr = mem;
987 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
988 sg_req->ch_regs.apb_seq = apb_seq;
989 sg_req->ch_regs.ahb_seq = ahb_seq;
990 sg_req->configured = false;
991 sg_req->last_sg = false;
992 sg_req->dma_desc = dma_desc;
993 sg_req->req_len = len;
995 list_add_tail(&sg_req->node, &dma_desc->tx_list);
997 sg_req->last_sg = true;
998 if (flags & DMA_CTRL_ACK)
999 dma_desc->txd.flags = DMA_CTRL_ACK;
1002 * Make sure that mode should not be conflicting with currently
1005 if (!tdc->isr_handler) {
1006 tdc->isr_handler = handle_once_dma_done;
1007 tdc->cyclic = false;
1010 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1011 tegra_dma_desc_put(tdc, dma_desc);
1016 return &dma_desc->txd;
1019 struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1020 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1021 size_t period_len, enum dma_transfer_direction direction,
1022 unsigned long flags, void *context)
1024 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1025 struct tegra_dma_desc *dma_desc = NULL;
1026 struct tegra_dma_sg_req *sg_req = NULL;
1027 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1030 dma_addr_t mem = buf_addr;
1032 enum dma_slave_buswidth slave_bw;
1035 if (!buf_len || !period_len) {
1036 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1040 if (!tdc->config_init) {
1041 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1046 * We allow to take more number of requests till DMA is
1047 * not started. The driver will loop over all requests.
1048 * Once DMA is started then new requests can be queued only after
1049 * terminating the DMA.
1052 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1057 * We only support cycle transfer when buf_len is multiple of
1060 if (buf_len % period_len) {
1061 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1066 if ((len & 3) || (buf_addr & 3) ||
1067 (len > tdc->tdma->chip_data->max_dma_count)) {
1068 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1072 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1073 &burst_size, &slave_bw);
1078 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1079 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1080 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1081 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1083 csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
1084 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1086 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1088 dma_desc = tegra_dma_desc_get(tdc);
1090 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1094 INIT_LIST_HEAD(&dma_desc->tx_list);
1095 INIT_LIST_HEAD(&dma_desc->cb_node);
1096 dma_desc->cb_count = 0;
1098 dma_desc->bytes_transferred = 0;
1099 dma_desc->bytes_requested = buf_len;
1100 remain_len = buf_len;
1102 /* Split transfer equal to period size */
1103 while (remain_len) {
1104 sg_req = tegra_dma_sg_req_get(tdc);
1106 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1107 tegra_dma_desc_put(tdc, dma_desc);
1111 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1112 sg_req->ch_regs.apb_ptr = apb_ptr;
1113 sg_req->ch_regs.ahb_ptr = mem;
1114 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
1115 sg_req->ch_regs.apb_seq = apb_seq;
1116 sg_req->ch_regs.ahb_seq = ahb_seq;
1117 sg_req->configured = false;
1118 sg_req->half_done = false;
1119 sg_req->last_sg = false;
1120 sg_req->dma_desc = dma_desc;
1121 sg_req->req_len = len;
1123 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1127 sg_req->last_sg = true;
1128 dma_desc->txd.flags = 0;
1131 * Make sure that mode should not be conflicting with currently
1134 if (!tdc->isr_handler) {
1135 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1139 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1140 tegra_dma_desc_put(tdc, dma_desc);
1145 return &dma_desc->txd;
1148 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1150 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1151 struct tegra_dma *tdma = tdc->tdma;
1154 dma_cookie_init(&tdc->dma_chan);
1155 tdc->config_init = false;
1156 ret = clk_prepare_enable(tdma->dma_clk);
1158 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1162 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1164 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1165 struct tegra_dma *tdma = tdc->tdma;
1167 struct tegra_dma_desc *dma_desc;
1168 struct tegra_dma_sg_req *sg_req;
1169 struct list_head dma_desc_list;
1170 struct list_head sg_req_list;
1171 unsigned long flags;
1173 INIT_LIST_HEAD(&dma_desc_list);
1174 INIT_LIST_HEAD(&sg_req_list);
1176 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1179 tegra_dma_terminate_all(dc);
1181 spin_lock_irqsave(&tdc->lock, flags);
1182 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1183 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1184 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1185 INIT_LIST_HEAD(&tdc->cb_desc);
1186 tdc->config_init = false;
1187 spin_unlock_irqrestore(&tdc->lock, flags);
1189 while (!list_empty(&dma_desc_list)) {
1190 dma_desc = list_first_entry(&dma_desc_list,
1191 typeof(*dma_desc), node);
1192 list_del(&dma_desc->node);
1196 while (!list_empty(&sg_req_list)) {
1197 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1198 list_del(&sg_req->node);
1201 clk_disable_unprepare(tdma->dma_clk);
1204 /* Tegra20 specific DMA controller information */
1205 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1207 .max_dma_count = 1024UL * 64,
1208 .support_channel_pause = false,
1211 #if defined(CONFIG_OF)
1212 /* Tegra30 specific DMA controller information */
1213 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1215 .max_dma_count = 1024UL * 64,
1216 .support_channel_pause = false,
1219 /* Tegra114 specific DMA controller information */
1220 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1222 .max_dma_count = 1024UL * 64,
1223 .support_channel_pause = true,
1227 static const struct of_device_id tegra_dma_of_match[] = {
1229 .compatible = "nvidia,tegra114-apbdma",
1230 .data = &tegra114_dma_chip_data,
1232 .compatible = "nvidia,tegra30-apbdma",
1233 .data = &tegra30_dma_chip_data,
1235 .compatible = "nvidia,tegra20-apbdma",
1236 .data = &tegra20_dma_chip_data,
1240 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1243 static int tegra_dma_probe(struct platform_device *pdev)
1245 struct resource *res;
1246 struct tegra_dma *tdma;
1249 const struct tegra_dma_chip_data *cdata = NULL;
1251 if (pdev->dev.of_node) {
1252 const struct of_device_id *match;
1253 match = of_match_device(of_match_ptr(tegra_dma_of_match),
1256 dev_err(&pdev->dev, "Error: No device match found\n");
1259 cdata = match->data;
1261 /* If no device tree then fallback to tegra20 */
1262 cdata = &tegra20_dma_chip_data;
1265 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1266 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1268 dev_err(&pdev->dev, "Error: memory allocation failed\n");
1272 tdma->dev = &pdev->dev;
1273 tdma->chip_data = cdata;
1274 platform_set_drvdata(pdev, tdma);
1276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1278 dev_err(&pdev->dev, "No mem resource for DMA\n");
1282 tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res);
1283 if (!tdma->base_addr) {
1285 "Cannot request memregion/iomap dma address\n");
1286 return -EADDRNOTAVAIL;
1289 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1290 if (IS_ERR(tdma->dma_clk)) {
1291 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1292 return PTR_ERR(tdma->dma_clk);
1295 spin_lock_init(&tdma->global_lock);
1297 pm_runtime_enable(&pdev->dev);
1298 if (!pm_runtime_enabled(&pdev->dev)) {
1299 ret = tegra_dma_runtime_resume(&pdev->dev);
1301 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
1303 goto err_pm_disable;
1307 /* Enable clock before accessing registers */
1308 ret = clk_prepare_enable(tdma->dma_clk);
1310 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1311 goto err_pm_disable;
1314 /* Reset DMA controller */
1315 tegra_periph_reset_assert(tdma->dma_clk);
1317 tegra_periph_reset_deassert(tdma->dma_clk);
1319 /* Enable global DMA registers */
1320 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1321 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1322 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1324 clk_disable_unprepare(tdma->dma_clk);
1326 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1327 for (i = 0; i < cdata->nr_channels; i++) {
1328 struct tegra_dma_channel *tdc = &tdma->channels[i];
1330 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1331 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
1333 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1336 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1339 tdc->irq = res->start;
1340 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1341 ret = devm_request_irq(&pdev->dev, tdc->irq,
1342 tegra_dma_isr, 0, tdc->name, tdc);
1345 "request_irq failed with err %d channel %d\n",
1350 tdc->dma_chan.device = &tdma->dma_dev;
1351 dma_cookie_init(&tdc->dma_chan);
1352 list_add_tail(&tdc->dma_chan.device_node,
1353 &tdma->dma_dev.channels);
1357 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1358 (unsigned long)tdc);
1359 spin_lock_init(&tdc->lock);
1361 INIT_LIST_HEAD(&tdc->pending_sg_req);
1362 INIT_LIST_HEAD(&tdc->free_sg_req);
1363 INIT_LIST_HEAD(&tdc->free_dma_desc);
1364 INIT_LIST_HEAD(&tdc->cb_desc);
1367 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1368 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1369 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1371 tdma->dma_dev.dev = &pdev->dev;
1372 tdma->dma_dev.device_alloc_chan_resources =
1373 tegra_dma_alloc_chan_resources;
1374 tdma->dma_dev.device_free_chan_resources =
1375 tegra_dma_free_chan_resources;
1376 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1377 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1378 tdma->dma_dev.device_control = tegra_dma_device_control;
1379 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1380 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1382 ret = dma_async_device_register(&tdma->dma_dev);
1385 "Tegra20 APB DMA driver registration failed %d\n", ret);
1389 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1390 cdata->nr_channels);
1395 struct tegra_dma_channel *tdc = &tdma->channels[i];
1396 tasklet_kill(&tdc->tasklet);
1400 pm_runtime_disable(&pdev->dev);
1401 if (!pm_runtime_status_suspended(&pdev->dev))
1402 tegra_dma_runtime_suspend(&pdev->dev);
1406 static int __devexit tegra_dma_remove(struct platform_device *pdev)
1408 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1410 struct tegra_dma_channel *tdc;
1412 dma_async_device_unregister(&tdma->dma_dev);
1414 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1415 tdc = &tdma->channels[i];
1416 tasklet_kill(&tdc->tasklet);
1419 pm_runtime_disable(&pdev->dev);
1420 if (!pm_runtime_status_suspended(&pdev->dev))
1421 tegra_dma_runtime_suspend(&pdev->dev);
1426 static int tegra_dma_runtime_suspend(struct device *dev)
1428 struct platform_device *pdev = to_platform_device(dev);
1429 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1431 clk_disable_unprepare(tdma->dma_clk);
1435 static int tegra_dma_runtime_resume(struct device *dev)
1437 struct platform_device *pdev = to_platform_device(dev);
1438 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1441 ret = clk_prepare_enable(tdma->dma_clk);
1443 dev_err(dev, "clk_enable failed: %d\n", ret);
1449 static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = {
1450 #ifdef CONFIG_PM_RUNTIME
1451 .runtime_suspend = tegra_dma_runtime_suspend,
1452 .runtime_resume = tegra_dma_runtime_resume,
1456 static struct platform_driver tegra_dmac_driver = {
1458 .name = "tegra-apbdma",
1459 .owner = THIS_MODULE,
1460 .pm = &tegra_dma_dev_pm_ops,
1461 .of_match_table = of_match_ptr(tegra_dma_of_match),
1463 .probe = tegra_dma_probe,
1464 .remove = tegra_dma_remove,
1467 module_platform_driver(tegra_dmac_driver);
1469 MODULE_ALIAS("platform:tegra20-apbdma");
1470 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1471 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1472 MODULE_LICENSE("GPL v2");