2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/clk.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
29 #include <linux/module.h>
31 #include <linux/of_device.h>
32 #include <linux/platform_device.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/slab.h>
37 #include "dmaengine.h"
39 #define TEGRA_APBDMA_GENERAL 0x0
40 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
42 #define TEGRA_APBDMA_CONTROL 0x010
43 #define TEGRA_APBDMA_IRQ_MASK 0x01c
44 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
47 #define TEGRA_APBDMA_CHAN_CSR 0x00
48 #define TEGRA_APBDMA_CSR_ENB BIT(31)
49 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
50 #define TEGRA_APBDMA_CSR_HOLD BIT(29)
51 #define TEGRA_APBDMA_CSR_DIR BIT(28)
52 #define TEGRA_APBDMA_CSR_ONCE BIT(27)
53 #define TEGRA_APBDMA_CSR_FLOW BIT(21)
54 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
55 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
58 #define TEGRA_APBDMA_CHAN_STATUS 0x004
59 #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
60 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
61 #define TEGRA_APBDMA_STATUS_HALT BIT(29)
62 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
63 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
64 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
66 /* AHB memory address */
67 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
69 /* AHB sequence register */
70 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
71 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
76 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
77 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
78 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
79 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
80 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
81 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
82 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
83 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
86 #define TEGRA_APBDMA_CHAN_APBPTR 0x018
88 /* APB sequence register */
89 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
94 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
95 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
96 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
99 * If any burst is in flight and DMA paused then this is the time to complete
100 * on-flight burst and update DMA status register.
102 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
104 /* Channel base address offset from APBDMA base address */
105 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
107 /* DMA channel register space size */
108 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
113 * tegra_dma_chip_data Tegra chip specific DMA data
114 * @nr_channels: Number of channels available in the controller.
115 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
117 struct tegra_dma_chip_data {
122 /* DMA channel registers */
123 struct tegra_dma_channel_regs {
125 unsigned long ahb_ptr;
126 unsigned long apb_ptr;
127 unsigned long ahb_seq;
128 unsigned long apb_seq;
132 * tegra_dma_sg_req: Dma request details to configure hardware. This
133 * contains the details for one transfer to configure DMA hw.
134 * The client's request for data transfer can be broken into multiple
135 * sub-transfer as per requester details and hw support.
136 * This sub transfer get added in the list of transfer and point to Tegra
137 * DMA descriptor which manages the transfer details.
139 struct tegra_dma_sg_req {
140 struct tegra_dma_channel_regs ch_regs;
145 struct list_head node;
146 struct tegra_dma_desc *dma_desc;
150 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
151 * This descriptor keep track of transfer status, callbacks and request
154 struct tegra_dma_desc {
155 struct dma_async_tx_descriptor txd;
157 int bytes_transferred;
158 enum dma_status dma_status;
159 struct list_head node;
160 struct list_head tx_list;
161 struct list_head cb_node;
165 struct tegra_dma_channel;
167 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
170 /* tegra_dma_channel: Channel specific information */
171 struct tegra_dma_channel {
172 struct dma_chan dma_chan;
177 unsigned long chan_base_offset;
180 struct tegra_dma *tdma;
183 /* Different lists for managing the requests */
184 struct list_head free_sg_req;
185 struct list_head pending_sg_req;
186 struct list_head free_dma_desc;
187 struct list_head cb_desc;
189 /* ISR handler and tasklet for bottom half of isr handling */
190 dma_isr_handler isr_handler;
191 struct tasklet_struct tasklet;
192 dma_async_tx_callback callback;
193 void *callback_param;
195 /* Channel-slave specific configuration */
196 struct dma_slave_config dma_sconfig;
199 /* tegra_dma: Tegra DMA specific information */
201 struct dma_device dma_dev;
204 spinlock_t global_lock;
205 void __iomem *base_addr;
206 const struct tegra_dma_chip_data *chip_data;
208 /* Some register need to be cache before suspend */
211 /* Last member of the structure */
212 struct tegra_dma_channel channels[0];
215 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
217 writel(val, tdma->base_addr + reg);
220 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
222 return readl(tdma->base_addr + reg);
225 static inline void tdc_write(struct tegra_dma_channel *tdc,
228 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
231 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
233 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
236 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
238 return container_of(dc, struct tegra_dma_channel, dma_chan);
241 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
242 struct dma_async_tx_descriptor *td)
244 return container_of(td, struct tegra_dma_desc, txd);
247 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
249 return &tdc->dma_chan.dev->device;
252 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
253 static int tegra_dma_runtime_suspend(struct device *dev);
254 static int tegra_dma_runtime_resume(struct device *dev);
256 /* Get DMA desc from free list, if not there then allocate it. */
257 static struct tegra_dma_desc *tegra_dma_desc_get(
258 struct tegra_dma_channel *tdc)
260 struct tegra_dma_desc *dma_desc;
263 spin_lock_irqsave(&tdc->lock, flags);
265 /* Do not allocate if desc are waiting for ack */
266 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
267 if (async_tx_test_ack(&dma_desc->txd)) {
268 list_del(&dma_desc->node);
269 spin_unlock_irqrestore(&tdc->lock, flags);
270 dma_desc->txd.flags = 0;
275 spin_unlock_irqrestore(&tdc->lock, flags);
277 /* Allocate DMA desc */
278 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
280 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
284 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
285 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
286 dma_desc->txd.flags = 0;
290 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
291 struct tegra_dma_desc *dma_desc)
295 spin_lock_irqsave(&tdc->lock, flags);
296 if (!list_empty(&dma_desc->tx_list))
297 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
298 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
299 spin_unlock_irqrestore(&tdc->lock, flags);
302 static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
303 struct tegra_dma_channel *tdc)
305 struct tegra_dma_sg_req *sg_req = NULL;
308 spin_lock_irqsave(&tdc->lock, flags);
309 if (!list_empty(&tdc->free_sg_req)) {
310 sg_req = list_first_entry(&tdc->free_sg_req,
311 typeof(*sg_req), node);
312 list_del(&sg_req->node);
313 spin_unlock_irqrestore(&tdc->lock, flags);
316 spin_unlock_irqrestore(&tdc->lock, flags);
318 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
320 dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
324 static int tegra_dma_slave_config(struct dma_chan *dc,
325 struct dma_slave_config *sconfig)
327 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
329 if (!list_empty(&tdc->pending_sg_req)) {
330 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
334 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
335 tdc->config_init = true;
339 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
340 bool wait_for_burst_complete)
342 struct tegra_dma *tdma = tdc->tdma;
344 spin_lock(&tdma->global_lock);
345 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
346 if (wait_for_burst_complete)
347 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
350 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
352 struct tegra_dma *tdma = tdc->tdma;
354 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
355 spin_unlock(&tdma->global_lock);
358 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
363 /* Disable interrupts */
364 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
365 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
366 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
369 csr &= ~TEGRA_APBDMA_CSR_ENB;
370 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
372 /* Clear interrupt status if it is there */
373 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
374 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
375 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
376 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
381 static void tegra_dma_start(struct tegra_dma_channel *tdc,
382 struct tegra_dma_sg_req *sg_req)
384 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
386 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
387 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
388 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
389 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
390 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
393 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
394 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
397 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
398 struct tegra_dma_sg_req *nsg_req)
400 unsigned long status;
403 * The DMA controller reloads the new configuration for next transfer
404 * after last burst of current transfer completes.
405 * If there is no IEC status then this makes sure that last burst
406 * has not be completed. There may be case that last burst is on
407 * flight and so it can complete but because DMA is paused, it
408 * will not generates interrupt as well as not reload the new
410 * If there is already IEC status then interrupt handler need to
411 * load new configuration.
413 tegra_dma_global_pause(tdc, false);
414 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
417 * If interrupt is pending then do nothing as the ISR will handle
418 * the programing for new request.
420 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
421 dev_err(tdc2dev(tdc),
422 "Skipping new configuration as interrupt is pending\n");
423 tegra_dma_global_resume(tdc);
427 /* Safe to program new configuration */
428 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
429 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
430 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
431 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
432 nsg_req->configured = true;
434 tegra_dma_global_resume(tdc);
437 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
439 struct tegra_dma_sg_req *sg_req;
441 if (list_empty(&tdc->pending_sg_req))
444 sg_req = list_first_entry(&tdc->pending_sg_req,
445 typeof(*sg_req), node);
446 tegra_dma_start(tdc, sg_req);
447 sg_req->configured = true;
451 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
453 struct tegra_dma_sg_req *hsgreq;
454 struct tegra_dma_sg_req *hnsgreq;
456 if (list_empty(&tdc->pending_sg_req))
459 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
460 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
461 hnsgreq = list_first_entry(&hsgreq->node,
462 typeof(*hnsgreq), node);
463 tegra_dma_configure_for_next(tdc, hnsgreq);
467 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
468 struct tegra_dma_sg_req *sg_req, unsigned long status)
470 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
473 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
475 struct tegra_dma_sg_req *sgreq;
476 struct tegra_dma_desc *dma_desc;
478 while (!list_empty(&tdc->pending_sg_req)) {
479 sgreq = list_first_entry(&tdc->pending_sg_req,
480 typeof(*sgreq), node);
481 list_move_tail(&sgreq->node, &tdc->free_sg_req);
482 if (sgreq->last_sg) {
483 dma_desc = sgreq->dma_desc;
484 dma_desc->dma_status = DMA_ERROR;
485 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
487 /* Add in cb list if it is not there. */
488 if (!dma_desc->cb_count)
489 list_add_tail(&dma_desc->cb_node,
491 dma_desc->cb_count++;
494 tdc->isr_handler = NULL;
497 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
498 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
500 struct tegra_dma_sg_req *hsgreq = NULL;
502 if (list_empty(&tdc->pending_sg_req)) {
503 dev_err(tdc2dev(tdc), "Dma is running without req\n");
509 * Check that head req on list should be in flight.
510 * If it is not in flight then abort transfer as
511 * looping of transfer can not continue.
513 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
514 if (!hsgreq->configured) {
516 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
517 tegra_dma_abort_all(tdc);
521 /* Configure next request */
523 tdc_configure_next_head_desc(tdc);
527 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
530 struct tegra_dma_sg_req *sgreq;
531 struct tegra_dma_desc *dma_desc;
534 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
535 dma_desc = sgreq->dma_desc;
536 dma_desc->bytes_transferred += sgreq->req_len;
538 list_del(&sgreq->node);
539 if (sgreq->last_sg) {
540 dma_desc->dma_status = DMA_SUCCESS;
541 dma_cookie_complete(&dma_desc->txd);
542 if (!dma_desc->cb_count)
543 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
544 dma_desc->cb_count++;
545 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
547 list_add_tail(&sgreq->node, &tdc->free_sg_req);
549 /* Do not start DMA if it is going to be terminate */
550 if (to_terminate || list_empty(&tdc->pending_sg_req))
553 tdc_start_head_req(tdc);
557 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
560 struct tegra_dma_sg_req *sgreq;
561 struct tegra_dma_desc *dma_desc;
564 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
565 dma_desc = sgreq->dma_desc;
566 dma_desc->bytes_transferred += sgreq->req_len;
568 /* Callback need to be call */
569 if (!dma_desc->cb_count)
570 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
571 dma_desc->cb_count++;
573 /* If not last req then put at end of pending list */
574 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
575 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
576 sgreq->configured = false;
577 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
579 dma_desc->dma_status = DMA_ERROR;
584 static void tegra_dma_tasklet(unsigned long data)
586 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
587 dma_async_tx_callback callback = NULL;
588 void *callback_param = NULL;
589 struct tegra_dma_desc *dma_desc;
593 spin_lock_irqsave(&tdc->lock, flags);
594 while (!list_empty(&tdc->cb_desc)) {
595 dma_desc = list_first_entry(&tdc->cb_desc,
596 typeof(*dma_desc), cb_node);
597 list_del(&dma_desc->cb_node);
598 callback = dma_desc->txd.callback;
599 callback_param = dma_desc->txd.callback_param;
600 cb_count = dma_desc->cb_count;
601 dma_desc->cb_count = 0;
602 spin_unlock_irqrestore(&tdc->lock, flags);
603 while (cb_count-- && callback)
604 callback(callback_param);
605 spin_lock_irqsave(&tdc->lock, flags);
607 spin_unlock_irqrestore(&tdc->lock, flags);
610 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
612 struct tegra_dma_channel *tdc = dev_id;
613 unsigned long status;
616 spin_lock_irqsave(&tdc->lock, flags);
618 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
619 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
620 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
621 tdc->isr_handler(tdc, false);
622 tasklet_schedule(&tdc->tasklet);
623 spin_unlock_irqrestore(&tdc->lock, flags);
627 spin_unlock_irqrestore(&tdc->lock, flags);
628 dev_info(tdc2dev(tdc),
629 "Interrupt already served status 0x%08lx\n", status);
633 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
635 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
636 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
640 spin_lock_irqsave(&tdc->lock, flags);
641 dma_desc->dma_status = DMA_IN_PROGRESS;
642 cookie = dma_cookie_assign(&dma_desc->txd);
643 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
644 spin_unlock_irqrestore(&tdc->lock, flags);
648 static void tegra_dma_issue_pending(struct dma_chan *dc)
650 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
653 spin_lock_irqsave(&tdc->lock, flags);
654 if (list_empty(&tdc->pending_sg_req)) {
655 dev_err(tdc2dev(tdc), "No DMA request\n");
659 tdc_start_head_req(tdc);
661 /* Continuous single mode: Configure next req */
664 * Wait for 1 burst time for configure DMA for
667 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
668 tdc_configure_next_head_desc(tdc);
672 spin_unlock_irqrestore(&tdc->lock, flags);
676 static void tegra_dma_terminate_all(struct dma_chan *dc)
678 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
679 struct tegra_dma_sg_req *sgreq;
680 struct tegra_dma_desc *dma_desc;
682 unsigned long status;
685 spin_lock_irqsave(&tdc->lock, flags);
686 if (list_empty(&tdc->pending_sg_req)) {
687 spin_unlock_irqrestore(&tdc->lock, flags);
694 /* Pause DMA before checking the queue status */
695 tegra_dma_global_pause(tdc, true);
697 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
698 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
699 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
700 tdc->isr_handler(tdc, true);
701 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
704 was_busy = tdc->busy;
707 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
708 sgreq = list_first_entry(&tdc->pending_sg_req,
709 typeof(*sgreq), node);
710 sgreq->dma_desc->bytes_transferred +=
711 get_current_xferred_count(tdc, sgreq, status);
713 tegra_dma_global_resume(tdc);
716 tegra_dma_abort_all(tdc);
718 while (!list_empty(&tdc->cb_desc)) {
719 dma_desc = list_first_entry(&tdc->cb_desc,
720 typeof(*dma_desc), cb_node);
721 list_del(&dma_desc->cb_node);
722 dma_desc->cb_count = 0;
724 spin_unlock_irqrestore(&tdc->lock, flags);
727 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
728 dma_cookie_t cookie, struct dma_tx_state *txstate)
730 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
731 struct tegra_dma_desc *dma_desc;
732 struct tegra_dma_sg_req *sg_req;
735 unsigned int residual;
737 spin_lock_irqsave(&tdc->lock, flags);
739 ret = dma_cookie_status(dc, cookie, txstate);
740 if (ret == DMA_SUCCESS) {
741 dma_set_residue(txstate, 0);
742 spin_unlock_irqrestore(&tdc->lock, flags);
746 /* Check on wait_ack desc status */
747 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
748 if (dma_desc->txd.cookie == cookie) {
749 residual = dma_desc->bytes_requested -
750 (dma_desc->bytes_transferred %
751 dma_desc->bytes_requested);
752 dma_set_residue(txstate, residual);
753 ret = dma_desc->dma_status;
754 spin_unlock_irqrestore(&tdc->lock, flags);
759 /* Check in pending list */
760 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
761 dma_desc = sg_req->dma_desc;
762 if (dma_desc->txd.cookie == cookie) {
763 residual = dma_desc->bytes_requested -
764 (dma_desc->bytes_transferred %
765 dma_desc->bytes_requested);
766 dma_set_residue(txstate, residual);
767 ret = dma_desc->dma_status;
768 spin_unlock_irqrestore(&tdc->lock, flags);
773 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
774 spin_unlock_irqrestore(&tdc->lock, flags);
778 static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
782 case DMA_SLAVE_CONFIG:
783 return tegra_dma_slave_config(dc,
784 (struct dma_slave_config *)arg);
786 case DMA_TERMINATE_ALL:
787 tegra_dma_terminate_all(dc);
797 static inline int get_bus_width(struct tegra_dma_channel *tdc,
798 enum dma_slave_buswidth slave_bw)
801 case DMA_SLAVE_BUSWIDTH_1_BYTE:
802 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
803 case DMA_SLAVE_BUSWIDTH_2_BYTES:
804 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
805 case DMA_SLAVE_BUSWIDTH_4_BYTES:
806 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
807 case DMA_SLAVE_BUSWIDTH_8_BYTES:
808 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
810 dev_warn(tdc2dev(tdc),
811 "slave bw is not supported, using 32bits\n");
812 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
816 static inline int get_burst_size(struct tegra_dma_channel *tdc,
817 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
823 * burst_size from client is in terms of the bus_width.
824 * convert them into AHB memory width which is 4 byte.
826 burst_byte = burst_size * slave_bw;
827 burst_ahb_width = burst_byte / 4;
829 /* If burst size is 0 then calculate the burst size based on length */
830 if (!burst_ahb_width) {
832 return TEGRA_APBDMA_AHBSEQ_BURST_1;
833 else if ((len >> 4) & 0x1)
834 return TEGRA_APBDMA_AHBSEQ_BURST_4;
836 return TEGRA_APBDMA_AHBSEQ_BURST_8;
838 if (burst_ahb_width < 4)
839 return TEGRA_APBDMA_AHBSEQ_BURST_1;
840 else if (burst_ahb_width < 8)
841 return TEGRA_APBDMA_AHBSEQ_BURST_4;
843 return TEGRA_APBDMA_AHBSEQ_BURST_8;
846 static int get_transfer_param(struct tegra_dma_channel *tdc,
847 enum dma_transfer_direction direction, unsigned long *apb_addr,
848 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
849 enum dma_slave_buswidth *slave_bw)
854 *apb_addr = tdc->dma_sconfig.dst_addr;
855 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
856 *burst_size = tdc->dma_sconfig.dst_maxburst;
857 *slave_bw = tdc->dma_sconfig.dst_addr_width;
858 *csr = TEGRA_APBDMA_CSR_DIR;
862 *apb_addr = tdc->dma_sconfig.src_addr;
863 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
864 *burst_size = tdc->dma_sconfig.src_maxburst;
865 *slave_bw = tdc->dma_sconfig.src_addr_width;
870 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
876 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
877 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
878 enum dma_transfer_direction direction, unsigned long flags,
881 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
882 struct tegra_dma_desc *dma_desc;
884 struct scatterlist *sg;
885 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
886 struct list_head req_list;
887 struct tegra_dma_sg_req *sg_req = NULL;
889 enum dma_slave_buswidth slave_bw;
892 if (!tdc->config_init) {
893 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
897 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
901 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
902 &burst_size, &slave_bw);
906 INIT_LIST_HEAD(&req_list);
908 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
909 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
910 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
911 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
913 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
914 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
915 if (flags & DMA_PREP_INTERRUPT)
916 csr |= TEGRA_APBDMA_CSR_IE_EOC;
918 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
920 dma_desc = tegra_dma_desc_get(tdc);
922 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
925 INIT_LIST_HEAD(&dma_desc->tx_list);
926 INIT_LIST_HEAD(&dma_desc->cb_node);
927 dma_desc->cb_count = 0;
928 dma_desc->bytes_requested = 0;
929 dma_desc->bytes_transferred = 0;
930 dma_desc->dma_status = DMA_IN_PROGRESS;
932 /* Make transfer requests */
933 for_each_sg(sgl, sg, sg_len, i) {
936 mem = sg_dma_address(sg);
937 len = sg_dma_len(sg);
939 if ((len & 3) || (mem & 3) ||
940 (len > tdc->tdma->chip_data->max_dma_count)) {
941 dev_err(tdc2dev(tdc),
942 "Dma length/memory address is not supported\n");
943 tegra_dma_desc_put(tdc, dma_desc);
947 sg_req = tegra_dma_sg_req_get(tdc);
949 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
950 tegra_dma_desc_put(tdc, dma_desc);
954 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
955 dma_desc->bytes_requested += len;
957 sg_req->ch_regs.apb_ptr = apb_ptr;
958 sg_req->ch_regs.ahb_ptr = mem;
959 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
960 sg_req->ch_regs.apb_seq = apb_seq;
961 sg_req->ch_regs.ahb_seq = ahb_seq;
962 sg_req->configured = false;
963 sg_req->last_sg = false;
964 sg_req->dma_desc = dma_desc;
965 sg_req->req_len = len;
967 list_add_tail(&sg_req->node, &dma_desc->tx_list);
969 sg_req->last_sg = true;
970 if (flags & DMA_CTRL_ACK)
971 dma_desc->txd.flags = DMA_CTRL_ACK;
974 * Make sure that mode should not be conflicting with currently
977 if (!tdc->isr_handler) {
978 tdc->isr_handler = handle_once_dma_done;
982 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
983 tegra_dma_desc_put(tdc, dma_desc);
988 return &dma_desc->txd;
991 struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
992 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
993 size_t period_len, enum dma_transfer_direction direction,
994 unsigned long flags, void *context)
996 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
997 struct tegra_dma_desc *dma_desc = NULL;
998 struct tegra_dma_sg_req *sg_req = NULL;
999 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1002 dma_addr_t mem = buf_addr;
1004 enum dma_slave_buswidth slave_bw;
1007 if (!buf_len || !period_len) {
1008 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1012 if (!tdc->config_init) {
1013 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1018 * We allow to take more number of requests till DMA is
1019 * not started. The driver will loop over all requests.
1020 * Once DMA is started then new requests can be queued only after
1021 * terminating the DMA.
1024 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1029 * We only support cycle transfer when buf_len is multiple of
1032 if (buf_len % period_len) {
1033 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1038 if ((len & 3) || (buf_addr & 3) ||
1039 (len > tdc->tdma->chip_data->max_dma_count)) {
1040 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1044 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1045 &burst_size, &slave_bw);
1050 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1051 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1052 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1053 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1055 csr |= TEGRA_APBDMA_CSR_FLOW;
1056 if (flags & DMA_PREP_INTERRUPT)
1057 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1058 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1060 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1062 dma_desc = tegra_dma_desc_get(tdc);
1064 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1068 INIT_LIST_HEAD(&dma_desc->tx_list);
1069 INIT_LIST_HEAD(&dma_desc->cb_node);
1070 dma_desc->cb_count = 0;
1072 dma_desc->bytes_transferred = 0;
1073 dma_desc->bytes_requested = buf_len;
1074 remain_len = buf_len;
1076 /* Split transfer equal to period size */
1077 while (remain_len) {
1078 sg_req = tegra_dma_sg_req_get(tdc);
1080 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1081 tegra_dma_desc_put(tdc, dma_desc);
1085 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1086 sg_req->ch_regs.apb_ptr = apb_ptr;
1087 sg_req->ch_regs.ahb_ptr = mem;
1088 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
1089 sg_req->ch_regs.apb_seq = apb_seq;
1090 sg_req->ch_regs.ahb_seq = ahb_seq;
1091 sg_req->configured = false;
1092 sg_req->half_done = false;
1093 sg_req->last_sg = false;
1094 sg_req->dma_desc = dma_desc;
1095 sg_req->req_len = len;
1097 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1101 sg_req->last_sg = true;
1102 if (flags & DMA_CTRL_ACK)
1103 dma_desc->txd.flags = DMA_CTRL_ACK;
1106 * Make sure that mode should not be conflicting with currently
1109 if (!tdc->isr_handler) {
1110 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1114 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1115 tegra_dma_desc_put(tdc, dma_desc);
1120 return &dma_desc->txd;
1123 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1125 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1126 struct tegra_dma *tdma = tdc->tdma;
1129 dma_cookie_init(&tdc->dma_chan);
1130 tdc->config_init = false;
1131 ret = clk_prepare_enable(tdma->dma_clk);
1133 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1137 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1139 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1140 struct tegra_dma *tdma = tdc->tdma;
1142 struct tegra_dma_desc *dma_desc;
1143 struct tegra_dma_sg_req *sg_req;
1144 struct list_head dma_desc_list;
1145 struct list_head sg_req_list;
1146 unsigned long flags;
1148 INIT_LIST_HEAD(&dma_desc_list);
1149 INIT_LIST_HEAD(&sg_req_list);
1151 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1154 tegra_dma_terminate_all(dc);
1156 spin_lock_irqsave(&tdc->lock, flags);
1157 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1158 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1159 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1160 INIT_LIST_HEAD(&tdc->cb_desc);
1161 tdc->config_init = false;
1162 spin_unlock_irqrestore(&tdc->lock, flags);
1164 while (!list_empty(&dma_desc_list)) {
1165 dma_desc = list_first_entry(&dma_desc_list,
1166 typeof(*dma_desc), node);
1167 list_del(&dma_desc->node);
1171 while (!list_empty(&sg_req_list)) {
1172 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1173 list_del(&sg_req->node);
1176 clk_disable_unprepare(tdma->dma_clk);
1179 /* Tegra20 specific DMA controller information */
1180 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1182 .max_dma_count = 1024UL * 64,
1185 #if defined(CONFIG_OF)
1186 /* Tegra30 specific DMA controller information */
1187 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1189 .max_dma_count = 1024UL * 64,
1192 static const struct of_device_id tegra_dma_of_match[] = {
1194 .compatible = "nvidia,tegra30-apbdma",
1195 .data = &tegra30_dma_chip_data,
1197 .compatible = "nvidia,tegra20-apbdma",
1198 .data = &tegra20_dma_chip_data,
1202 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1205 static int tegra_dma_probe(struct platform_device *pdev)
1207 struct resource *res;
1208 struct tegra_dma *tdma;
1211 const struct tegra_dma_chip_data *cdata = NULL;
1213 if (pdev->dev.of_node) {
1214 const struct of_device_id *match;
1215 match = of_match_device(of_match_ptr(tegra_dma_of_match),
1218 dev_err(&pdev->dev, "Error: No device match found\n");
1221 cdata = match->data;
1223 /* If no device tree then fallback to tegra20 */
1224 cdata = &tegra20_dma_chip_data;
1227 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1228 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1230 dev_err(&pdev->dev, "Error: memory allocation failed\n");
1234 tdma->dev = &pdev->dev;
1235 tdma->chip_data = cdata;
1236 platform_set_drvdata(pdev, tdma);
1238 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1240 dev_err(&pdev->dev, "No mem resource for DMA\n");
1244 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1245 if (IS_ERR(tdma->base_addr))
1246 return PTR_ERR(tdma->base_addr);
1248 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1249 if (IS_ERR(tdma->dma_clk)) {
1250 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1251 return PTR_ERR(tdma->dma_clk);
1254 spin_lock_init(&tdma->global_lock);
1256 pm_runtime_enable(&pdev->dev);
1257 if (!pm_runtime_enabled(&pdev->dev)) {
1258 ret = tegra_dma_runtime_resume(&pdev->dev);
1260 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
1262 goto err_pm_disable;
1266 /* Enable clock before accessing registers */
1267 ret = clk_prepare_enable(tdma->dma_clk);
1269 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1270 goto err_pm_disable;
1273 /* Reset DMA controller */
1274 tegra_periph_reset_assert(tdma->dma_clk);
1276 tegra_periph_reset_deassert(tdma->dma_clk);
1278 /* Enable global DMA registers */
1279 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1280 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1281 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1283 clk_disable_unprepare(tdma->dma_clk);
1285 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1286 for (i = 0; i < cdata->nr_channels; i++) {
1287 struct tegra_dma_channel *tdc = &tdma->channels[i];
1289 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1290 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
1292 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1295 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1298 tdc->irq = res->start;
1299 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1300 ret = devm_request_irq(&pdev->dev, tdc->irq,
1301 tegra_dma_isr, 0, tdc->name, tdc);
1304 "request_irq failed with err %d channel %d\n",
1309 tdc->dma_chan.device = &tdma->dma_dev;
1310 dma_cookie_init(&tdc->dma_chan);
1311 list_add_tail(&tdc->dma_chan.device_node,
1312 &tdma->dma_dev.channels);
1316 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1317 (unsigned long)tdc);
1318 spin_lock_init(&tdc->lock);
1320 INIT_LIST_HEAD(&tdc->pending_sg_req);
1321 INIT_LIST_HEAD(&tdc->free_sg_req);
1322 INIT_LIST_HEAD(&tdc->free_dma_desc);
1323 INIT_LIST_HEAD(&tdc->cb_desc);
1326 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1327 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1328 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1330 tdma->dma_dev.dev = &pdev->dev;
1331 tdma->dma_dev.device_alloc_chan_resources =
1332 tegra_dma_alloc_chan_resources;
1333 tdma->dma_dev.device_free_chan_resources =
1334 tegra_dma_free_chan_resources;
1335 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1336 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1337 tdma->dma_dev.device_control = tegra_dma_device_control;
1338 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1339 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1341 ret = dma_async_device_register(&tdma->dma_dev);
1344 "Tegra20 APB DMA driver registration failed %d\n", ret);
1348 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1349 cdata->nr_channels);
1354 struct tegra_dma_channel *tdc = &tdma->channels[i];
1355 tasklet_kill(&tdc->tasklet);
1359 pm_runtime_disable(&pdev->dev);
1360 if (!pm_runtime_status_suspended(&pdev->dev))
1361 tegra_dma_runtime_suspend(&pdev->dev);
1365 static int tegra_dma_remove(struct platform_device *pdev)
1367 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1369 struct tegra_dma_channel *tdc;
1371 dma_async_device_unregister(&tdma->dma_dev);
1373 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1374 tdc = &tdma->channels[i];
1375 tasklet_kill(&tdc->tasklet);
1378 pm_runtime_disable(&pdev->dev);
1379 if (!pm_runtime_status_suspended(&pdev->dev))
1380 tegra_dma_runtime_suspend(&pdev->dev);
1385 static int tegra_dma_runtime_suspend(struct device *dev)
1387 struct platform_device *pdev = to_platform_device(dev);
1388 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1390 clk_disable_unprepare(tdma->dma_clk);
1394 static int tegra_dma_runtime_resume(struct device *dev)
1396 struct platform_device *pdev = to_platform_device(dev);
1397 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1400 ret = clk_prepare_enable(tdma->dma_clk);
1402 dev_err(dev, "clk_enable failed: %d\n", ret);
1408 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1409 #ifdef CONFIG_PM_RUNTIME
1410 .runtime_suspend = tegra_dma_runtime_suspend,
1411 .runtime_resume = tegra_dma_runtime_resume,
1415 static struct platform_driver tegra_dmac_driver = {
1417 .name = "tegra-apbdma",
1418 .owner = THIS_MODULE,
1419 .pm = &tegra_dma_dev_pm_ops,
1420 .of_match_table = of_match_ptr(tegra_dma_of_match),
1422 .probe = tegra_dma_probe,
1423 .remove = tegra_dma_remove,
1426 module_platform_driver(tegra_dmac_driver);
1428 MODULE_ALIAS("platform:tegra20-apbdma");
1429 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1430 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1431 MODULE_LICENSE("GPL v2");