2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
50 #include "rk_sdmmc_dbg.h"
51 #include <linux/regulator/rockchip_io_vol_domain.h>
52 #include "../../clk/rockchip/clk-ops.h"
54 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
56 /* Common flag combinations */
57 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
58 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
60 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
62 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
63 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
64 #define DW_MCI_SEND_STATUS 1
65 #define DW_MCI_RECV_STATUS 2
66 #define DW_MCI_DMA_THRESHOLD 16
68 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
69 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
71 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
72 #define SDMMC_DATA_TIMEOUT_SD 500
73 #define SDMMC_DATA_TIMEOUT_SDIO 250
74 #define SDMMC_DATA_TIMEOUT_EMMC 2500
76 #define SDMMC_CMD_RTO_MAX_HOLD 200
77 #define SDMMC_WAIT_FOR_UNBUSY 2500
79 #ifdef CONFIG_MMC_DW_IDMAC
80 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
81 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
82 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
86 u32 des0; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 u32 des1; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
99 u32 des2; /* buffer 1 physical address */
101 u32 des3; /* buffer 2 physical address */
103 #endif /* CONFIG_MMC_DW_IDMAC */
105 static const u8 tuning_blk_pattern_4bit[] = {
106 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
107 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
108 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
109 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
110 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
111 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
112 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
113 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
116 static const u8 tuning_blk_pattern_8bit[] = {
117 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
118 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
119 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
120 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
121 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
122 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
123 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
124 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
125 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
126 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
127 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
128 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
129 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
130 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
131 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
132 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
135 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
136 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
137 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
138 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
140 /*printk the all register of current host*/
142 static int dw_mci_regs_printk(struct dw_mci *host)
144 struct sdmmc_reg *regs = dw_mci_regs;
146 while( regs->name != 0 ){
147 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
150 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
155 #if defined(CONFIG_DEBUG_FS)
156 static int dw_mci_req_show(struct seq_file *s, void *v)
158 struct dw_mci_slot *slot = s->private;
159 struct mmc_request *mrq;
160 struct mmc_command *cmd;
161 struct mmc_command *stop;
162 struct mmc_data *data;
164 /* Make sure we get a consistent snapshot */
165 spin_lock_bh(&slot->host->lock);
175 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
176 cmd->opcode, cmd->arg, cmd->flags,
177 cmd->resp[0], cmd->resp[1], cmd->resp[2],
178 cmd->resp[2], cmd->error);
180 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
181 data->bytes_xfered, data->blocks,
182 data->blksz, data->flags, data->error);
185 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
186 stop->opcode, stop->arg, stop->flags,
187 stop->resp[0], stop->resp[1], stop->resp[2],
188 stop->resp[2], stop->error);
191 spin_unlock_bh(&slot->host->lock);
196 static int dw_mci_req_open(struct inode *inode, struct file *file)
198 return single_open(file, dw_mci_req_show, inode->i_private);
201 static const struct file_operations dw_mci_req_fops = {
202 .owner = THIS_MODULE,
203 .open = dw_mci_req_open,
206 .release = single_release,
209 static int dw_mci_regs_show(struct seq_file *s, void *v)
211 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
212 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
213 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
214 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
215 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
216 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
221 static int dw_mci_regs_open(struct inode *inode, struct file *file)
223 return single_open(file, dw_mci_regs_show, inode->i_private);
226 static const struct file_operations dw_mci_regs_fops = {
227 .owner = THIS_MODULE,
228 .open = dw_mci_regs_open,
231 .release = single_release,
234 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
236 struct mmc_host *mmc = slot->mmc;
237 struct dw_mci *host = slot->host;
241 root = mmc->debugfs_root;
245 node = debugfs_create_file("regs", S_IRUSR, root, host,
250 node = debugfs_create_file("req", S_IRUSR, root, slot,
255 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
259 node = debugfs_create_x32("pending_events", S_IRUSR, root,
260 (u32 *)&host->pending_events);
264 node = debugfs_create_x32("completed_events", S_IRUSR, root,
265 (u32 *)&host->completed_events);
272 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
274 #endif /* defined(CONFIG_DEBUG_FS) */
276 static void dw_mci_set_timeout(struct dw_mci *host)
278 /* timeout (maximum) */
279 mci_writel(host, TMOUT, 0xffffffff);
282 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
284 struct mmc_data *data;
285 struct dw_mci_slot *slot = mmc_priv(mmc);
286 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
288 cmd->error = -EINPROGRESS;
292 if (cmdr == MMC_STOP_TRANSMISSION)
293 cmdr |= SDMMC_CMD_STOP;
295 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
297 if (cmd->flags & MMC_RSP_PRESENT) {
298 /* We expect a response, so set this bit */
299 cmdr |= SDMMC_CMD_RESP_EXP;
300 if (cmd->flags & MMC_RSP_136)
301 cmdr |= SDMMC_CMD_RESP_LONG;
304 if (cmd->flags & MMC_RSP_CRC)
305 cmdr |= SDMMC_CMD_RESP_CRC;
309 cmdr |= SDMMC_CMD_DAT_EXP;
310 if (data->flags & MMC_DATA_STREAM)
311 cmdr |= SDMMC_CMD_STRM_MODE;
312 if (data->flags & MMC_DATA_WRITE)
313 cmdr |= SDMMC_CMD_DAT_WR;
316 if (drv_data && drv_data->prepare_command)
317 drv_data->prepare_command(slot->host, &cmdr);
323 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
325 struct mmc_command *stop;
331 stop = &host->stop_abort;
333 memset(stop, 0, sizeof(struct mmc_command));
335 if (cmdr == MMC_READ_SINGLE_BLOCK ||
336 cmdr == MMC_READ_MULTIPLE_BLOCK ||
337 cmdr == MMC_WRITE_BLOCK ||
338 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
339 stop->opcode = MMC_STOP_TRANSMISSION;
341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
342 } else if (cmdr == SD_IO_RW_EXTENDED) {
343 stop->opcode = SD_IO_RW_DIRECT;
344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
345 ((cmd->arg >> 28) & 0x7);
346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
351 cmdr = stop->opcode | SDMMC_CMD_STOP |
352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
357 static void dw_mci_start_command(struct dw_mci *host,
358 struct mmc_command *cmd, u32 cmd_flags)
360 struct dw_mci_slot *slot = host->slot[0];
361 /*temporality fix slot[0] due to host->num_slots equal to 1*/
363 host->pre_cmd = host->cmd;
366 "start command: ARGR=0x%08x CMDR=0x%08x\n",
367 cmd->arg, cmd_flags);
369 if(SD_SWITCH_VOLTAGE == cmd->opcode){
370 /*confirm non-low-power mode*/
371 mci_writel(host, CMDARG, 0);
372 dw_mci_disable_low_power(slot);
374 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
375 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
377 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
380 mci_writel(host, CMDARG, cmd->arg);
383 /* fix the value to 1 in some Soc,for example RK3188. */
384 if(host->mmc->hold_reg_flag)
385 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
387 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
391 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
393 dw_mci_start_command(host, data->stop, host->stop_cmdr);
396 /* DMA interface functions */
397 static void dw_mci_stop_dma(struct dw_mci *host)
399 if (host->using_dma) {
400 /* Fixme: No need to terminate edma, may cause flush op */
401 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
402 host->dma_ops->stop(host);
403 host->dma_ops->cleanup(host);
406 /* Data transfer was stopped by the interrupt handler */
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
410 static int dw_mci_get_dma_dir(struct mmc_data *data)
412 if (data->flags & MMC_DATA_WRITE)
413 return DMA_TO_DEVICE;
415 return DMA_FROM_DEVICE;
418 #ifdef CONFIG_MMC_DW_IDMAC
419 static void dw_mci_dma_cleanup(struct dw_mci *host)
421 struct mmc_data *data = host->data;
424 if (!data->host_cookie)
425 dma_unmap_sg(host->dev,
428 dw_mci_get_dma_dir(data));
431 static void dw_mci_idmac_reset(struct dw_mci *host)
433 u32 bmod = mci_readl(host, BMOD);
434 /* Software reset of DMA */
435 bmod |= SDMMC_IDMAC_SWRESET;
436 mci_writel(host, BMOD, bmod);
439 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
443 /* Disable and reset the IDMAC interface */
444 temp = mci_readl(host, CTRL);
445 temp &= ~SDMMC_CTRL_USE_IDMAC;
446 temp |= SDMMC_CTRL_DMA_RESET;
447 mci_writel(host, CTRL, temp);
449 /* Stop the IDMAC running */
450 temp = mci_readl(host, BMOD);
451 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
452 temp |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, temp);
456 static void dw_mci_idmac_complete_dma(void *arg)
458 struct dw_mci *host = arg;
459 struct mmc_data *data = host->data;
461 dev_vdbg(host->dev, "DMA complete\n");
464 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
465 host->mrq->cmd->opcode,host->mrq->cmd->arg,
466 data->blocks,data->blksz,mmc_hostname(host->mmc));
469 host->dma_ops->cleanup(host);
472 * If the card was removed, data will be NULL. No point in trying to
473 * send the stop command or waiting for NBUSY in this case.
476 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
477 tasklet_schedule(&host->tasklet);
481 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
485 struct idmac_desc *desc = host->sg_cpu;
487 for (i = 0; i < sg_len; i++, desc++) {
488 unsigned int length = sg_dma_len(&data->sg[i]);
489 u32 mem_addr = sg_dma_address(&data->sg[i]);
491 /* Set the OWN bit and disable interrupts for this descriptor */
492 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
495 IDMAC_SET_BUFFER1_SIZE(desc, length);
497 /* Physical address to DMA to/from */
498 desc->des2 = mem_addr;
501 /* Set first descriptor */
503 desc->des0 |= IDMAC_DES0_FD;
505 /* Set last descriptor */
506 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
507 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
508 desc->des0 |= IDMAC_DES0_LD;
513 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
517 dw_mci_translate_sglist(host, host->data, sg_len);
519 /* Select IDMAC interface */
520 temp = mci_readl(host, CTRL);
521 temp |= SDMMC_CTRL_USE_IDMAC;
522 mci_writel(host, CTRL, temp);
526 /* Enable the IDMAC */
527 temp = mci_readl(host, BMOD);
528 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
529 mci_writel(host, BMOD, temp);
531 /* Start it running */
532 mci_writel(host, PLDMND, 1);
535 static int dw_mci_idmac_init(struct dw_mci *host)
537 struct idmac_desc *p;
540 /* Number of descriptors in the ring buffer */
541 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
543 /* Forward link the descriptor list */
544 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
545 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
547 /* Set the last descriptor as the end-of-ring descriptor */
548 p->des3 = host->sg_dma;
549 p->des0 = IDMAC_DES0_ER;
551 dw_mci_idmac_reset(host);
553 /* Mask out interrupts - get Tx & Rx complete only */
554 mci_writel(host, IDSTS, IDMAC_INT_CLR);
555 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
558 /* Set the descriptor base address */
559 mci_writel(host, DBADDR, host->sg_dma);
563 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
564 .init = dw_mci_idmac_init,
565 .start = dw_mci_idmac_start_dma,
566 .stop = dw_mci_idmac_stop_dma,
567 .complete = dw_mci_idmac_complete_dma,
568 .cleanup = dw_mci_dma_cleanup,
572 static void dw_mci_edma_cleanup(struct dw_mci *host)
574 struct mmc_data *data = host->data;
577 if (!data->host_cookie)
578 dma_unmap_sg(host->dev,
579 data->sg, data->sg_len,
580 dw_mci_get_dma_dir(data));
583 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
585 dmaengine_terminate_all(host->dms->ch);
588 static void dw_mci_edmac_complete_dma(void *arg)
590 struct dw_mci *host = arg;
591 struct mmc_data *data = host->data;
593 dev_vdbg(host->dev, "DMA complete\n");
596 if(data->flags & MMC_DATA_READ)
597 /* Invalidate cache after read */
598 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
599 data->sg_len, DMA_FROM_DEVICE);
601 host->dma_ops->cleanup(host);
604 * If the card was removed, data will be NULL. No point in trying to
605 * send the stop command or waiting for NBUSY in this case.
608 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
609 tasklet_schedule(&host->tasklet);
613 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
615 struct dma_slave_config slave_config;
616 struct dma_async_tx_descriptor *desc = NULL;
617 struct scatterlist *sgl = host->data->sg;
618 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
619 u32 sg_elems = host->data->sg_len;
620 u32 fifoth_val, mburst;
624 /* Set external dma config: burst size, burst width*/
625 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
626 slave_config.src_addr = slave_config.dst_addr;
627 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
628 slave_config.src_addr_width = slave_config.dst_addr_width;
630 /* Match FIFO dma burst MSIZE with external dma config*/
631 fifoth_val = mci_readl(host, FIFOTH);
632 mburst = mszs[(fifoth_val >> 28) & 0x7];
634 /* edmac limit burst to 16, but work around for rk3036 to 8 */
635 if (unlikely(cpu_is_rk3036()))
640 slave_config.dst_maxburst = (mburst > burst_limit) ? burst_limit : mburst;
641 slave_config.src_maxburst = slave_config.dst_maxburst;
643 if(host->data->flags & MMC_DATA_WRITE){
644 slave_config.direction = DMA_MEM_TO_DEV;
645 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
647 dev_err(host->dev, "error in dw_mci edma configuration.\n");
651 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
652 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
654 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
657 /* Set dw_mci_edmac_complete_dma as callback */
658 desc->callback = dw_mci_edmac_complete_dma;
659 desc->callback_param = (void *)host;
660 dmaengine_submit(desc);
662 /* Flush cache before write */
663 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
664 sg_elems, DMA_TO_DEVICE);
665 dma_async_issue_pending(host->dms->ch);
668 slave_config.direction = DMA_DEV_TO_MEM;
669 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
671 dev_err(host->dev, "error in dw_mci edma configuration.\n");
674 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
675 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
677 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
680 /* set dw_mci_edmac_complete_dma as callback */
681 desc->callback = dw_mci_edmac_complete_dma;
682 desc->callback_param = (void *)host;
683 dmaengine_submit(desc);
684 dma_async_issue_pending(host->dms->ch);
688 static int dw_mci_edmac_init(struct dw_mci *host)
690 /* Request external dma channel, SHOULD decide chn in dts */
692 host->dms = (struct dw_mci_dma_slave *)kmalloc
693 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
694 if (NULL == host->dms) {
695 dev_err(host->dev, "No enough memory to alloc dms.\n");
699 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
700 if (!host->dms->ch) {
701 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
702 host->dms->ch->chan_id);
709 if (NULL != host->dms) {
717 static void dw_mci_edmac_exit(struct dw_mci *host)
719 if (NULL != host->dms) {
720 if (NULL != host->dms->ch) {
721 dma_release_channel(host->dms->ch);
722 host->dms->ch = NULL;
729 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
730 .init = dw_mci_edmac_init,
731 .exit = dw_mci_edmac_exit,
732 .start = dw_mci_edmac_start_dma,
733 .stop = dw_mci_edmac_stop_dma,
734 .complete = dw_mci_edmac_complete_dma,
735 .cleanup = dw_mci_edma_cleanup,
737 #endif /* CONFIG_MMC_DW_IDMAC */
739 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
740 struct mmc_data *data,
743 struct scatterlist *sg;
744 unsigned int i, sg_len;
746 if (!next && data->host_cookie)
747 return data->host_cookie;
750 * We don't do DMA on "complex" transfers, i.e. with
751 * non-word-aligned buffers or lengths. Also, we don't bother
752 * with all the DMA setup overhead for short transfers.
754 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
760 for_each_sg(data->sg, sg, data->sg_len, i) {
761 if (sg->offset & 3 || sg->length & 3)
765 sg_len = dma_map_sg(host->dev,
768 dw_mci_get_dma_dir(data));
773 data->host_cookie = sg_len;
778 static void dw_mci_pre_req(struct mmc_host *mmc,
779 struct mmc_request *mrq,
782 struct dw_mci_slot *slot = mmc_priv(mmc);
783 struct mmc_data *data = mrq->data;
785 if (!slot->host->use_dma || !data)
788 if (data->host_cookie) {
789 data->host_cookie = 0;
793 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
794 data->host_cookie = 0;
797 static void dw_mci_post_req(struct mmc_host *mmc,
798 struct mmc_request *mrq,
801 struct dw_mci_slot *slot = mmc_priv(mmc);
802 struct mmc_data *data = mrq->data;
804 if (!slot->host->use_dma || !data)
807 if (data->host_cookie)
808 dma_unmap_sg(slot->host->dev,
811 dw_mci_get_dma_dir(data));
812 data->host_cookie = 0;
815 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
817 #ifdef CONFIG_MMC_DW_IDMAC
818 unsigned int blksz = data->blksz;
819 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
820 u32 fifo_width = 1 << host->data_shift;
821 u32 blksz_depth = blksz / fifo_width, fifoth_val;
822 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
823 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
825 tx_wmark = (host->fifo_depth) / 2;
826 tx_wmark_invers = host->fifo_depth - tx_wmark;
830 * if blksz is not a multiple of the FIFO width
832 if (blksz % fifo_width) {
839 if (!((blksz_depth % mszs[idx]) ||
840 (tx_wmark_invers % mszs[idx]))) {
842 rx_wmark = mszs[idx] - 1;
847 * If idx is '0', it won't be tried
848 * Thus, initial values are uesed
851 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
852 mci_writel(host, FIFOTH, fifoth_val);
857 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
859 unsigned int blksz = data->blksz;
860 u32 blksz_depth, fifo_depth;
863 WARN_ON(!(data->flags & MMC_DATA_READ));
865 if (host->timing != MMC_TIMING_MMC_HS200 &&
866 host->timing != MMC_TIMING_UHS_SDR104)
869 blksz_depth = blksz / (1 << host->data_shift);
870 fifo_depth = host->fifo_depth;
872 if (blksz_depth > fifo_depth)
876 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
877 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
878 * Currently just choose blksz.
881 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
885 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
888 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
896 /* If we don't have a channel, we can't do DMA */
900 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
902 /* Fixme: No need terminate edma, may cause flush op */
903 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
904 host->dma_ops->stop(host);
911 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
912 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
916 * Decide the MSIZE and RX/TX Watermark.
917 * If current block size is same with previous size,
918 * no need to update fifoth.
920 if (host->prev_blksz != data->blksz)
921 dw_mci_adjust_fifoth(host, data);
924 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
926 /* Enable the DMA interface */
927 temp = mci_readl(host, CTRL);
928 temp |= SDMMC_CTRL_DMA_ENABLE;
929 mci_writel(host, CTRL, temp);
931 /* Disable RX/TX IRQs, let DMA handle it */
932 spin_lock_irqsave(&host->slock, flags);
933 temp = mci_readl(host, INTMASK);
934 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
935 mci_writel(host, INTMASK, temp);
936 spin_unlock_irqrestore(&host->slock, flags);
938 host->dma_ops->start(host, sg_len);
943 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
948 data->error = -EINPROGRESS;
955 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
957 if (data->flags & MMC_DATA_READ) {
958 host->dir_status = DW_MCI_RECV_STATUS;
959 dw_mci_ctrl_rd_thld(host, data);
961 host->dir_status = DW_MCI_SEND_STATUS;
964 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
965 data->blocks, data->blksz, mmc_hostname(host->mmc));
967 if (dw_mci_submit_data_dma(host, data)) {
968 int flags = SG_MITER_ATOMIC;
969 if (host->data->flags & MMC_DATA_READ)
970 flags |= SG_MITER_TO_SG;
972 flags |= SG_MITER_FROM_SG;
974 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
976 host->part_buf_start = 0;
977 host->part_buf_count = 0;
979 spin_lock_irqsave(&host->slock, flag);
980 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
981 temp = mci_readl(host, INTMASK);
982 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
983 mci_writel(host, INTMASK, temp);
984 spin_unlock_irqrestore(&host->slock, flag);
986 temp = mci_readl(host, CTRL);
987 temp &= ~SDMMC_CTRL_DMA_ENABLE;
988 mci_writel(host, CTRL, temp);
991 * Use the initial fifoth_val for PIO mode.
992 * If next issued data may be transfered by DMA mode,
993 * prev_blksz should be invalidated.
995 mci_writel(host, FIFOTH, host->fifoth_val);
996 host->prev_blksz = 0;
999 * Keep the current block size.
1000 * It will be used to decide whether to update
1001 * fifoth register next time.
1003 host->prev_blksz = data->blksz;
1007 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1009 struct dw_mci *host = slot->host;
1010 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1011 unsigned int cmd_status = 0;
1012 #ifdef SDMMC_WAIT_FOR_UNBUSY
1014 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1016 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1018 ret = time_before(jiffies, timeout);
1019 cmd_status = mci_readl(host, STATUS);
1020 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1024 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1025 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1028 mci_writel(host, CMDARG, arg);
1030 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1031 if(cmd & SDMMC_CMD_UPD_CLK)
1032 timeout = jiffies + msecs_to_jiffies(50);
1034 timeout = jiffies + msecs_to_jiffies(500);
1035 while (time_before(jiffies, timeout)) {
1036 cmd_status = mci_readl(host, CMD);
1037 if (!(cmd_status & SDMMC_CMD_START))
1040 dev_err(&slot->mmc->class_dev,
1041 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1042 cmd, arg, cmd_status);
1045 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1047 struct dw_mci *host = slot->host;
1048 unsigned int tempck,clock = slot->clock;
1053 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1054 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1057 mci_writel(host, CLKENA, 0);
1058 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1059 if(host->svi_flags == 0)
1060 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 } else if (clock != host->current_speed || force_clkinit) {
1065 div = host->bus_hz / clock;
1066 if (host->bus_hz % clock && host->bus_hz > clock)
1068 * move the + 1 after the divide to prevent
1069 * over-clocking the card.
1073 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1075 if ((clock << div) != slot->__clk_old || force_clkinit) {
1076 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1077 dev_info(&slot->mmc->class_dev,
1078 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1079 slot->id, host->bus_hz, clock,
1082 host->set_speed = tempck;
1083 host->set_div = div;
1087 mci_writel(host, CLKENA, 0);
1088 mci_writel(host, CLKSRC, 0);
1092 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1094 if(clock <= 400*1000){
1095 MMC_DBG_BOOT_FUNC(host->mmc,
1096 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1097 clock * 2, mmc_hostname(host->mmc));
1098 /* clk_mmc will change parents to 24MHz xtal*/
1099 clk_set_rate(host->clk_mmc, clock * 2);
1102 host->set_div = div;
1106 MMC_DBG_BOOT_FUNC(host->mmc,
1107 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1108 mmc_hostname(host->mmc));
1111 MMC_DBG_ERR_FUNC(host->mmc,
1112 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1113 mmc_hostname(host->mmc));
1115 host->set_div = div;
1116 host->bus_hz = host->set_speed * 2;
1117 MMC_DBG_BOOT_FUNC(host->mmc,
1118 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1119 div, host->bus_hz, mmc_hostname(host->mmc));
1121 /* BUG may be here, come on, Linux BSP engineer looks!
1122 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1123 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1124 some oops happened like that:
1125 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1126 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1127 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1128 mmc0: new high speed DDR MMC card at address 0001
1129 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1131 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1132 mmcblk0: retrying using single block read
1133 mmcblk0: error -110 sending status command, retrying
1135 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1138 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1139 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1142 host->set_div = div;
1143 host->bus_hz = host->set_speed * 2;
1144 MMC_DBG_BOOT_FUNC(host->mmc,
1145 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1146 div, host->bus_hz, mmc_hostname(host->mmc));
1149 if (host->verid < DW_MMC_240A)
1150 clk_set_rate(host->clk_mmc,(host->bus_hz));
1152 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1158 /* set clock to desired speed */
1159 mci_writel(host, CLKDIV, div);
1163 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1165 /* enable clock; only low power if no SDIO */
1166 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1168 if (host->verid < DW_MMC_240A)
1169 sdio_int = SDMMC_INT_SDIO(slot->id);
1171 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1173 if (!(mci_readl(host, INTMASK) & sdio_int))
1174 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1175 mci_writel(host, CLKENA, clk_en_a);
1179 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1180 /* keep the clock with reflecting clock dividor */
1181 slot->__clk_old = clock << div;
1184 host->current_speed = clock;
1186 if(slot->ctype != slot->pre_ctype)
1187 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1189 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1190 mmc_hostname(host->mmc));
1191 slot->pre_ctype = slot->ctype;
1193 /* Set the current slot bus width */
1194 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1197 extern struct mmc_card *this_card;
1198 static void dw_mci_wait_unbusy(struct dw_mci *host)
1201 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1202 unsigned long time_loop;
1203 unsigned int status;
1206 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1208 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1209 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1210 /* Special care for (secure)erase timeout calculation */
1212 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1215 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1216 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1217 300000 * (this_card->ext_csd.sec_erase_mult)) :
1218 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1222 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1223 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1224 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1225 timeout = SDMMC_DATA_TIMEOUT_SD;
1228 time_loop = jiffies + msecs_to_jiffies(timeout);
1230 status = mci_readl(host, STATUS);
1231 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1233 } while (time_before(jiffies, time_loop));
1238 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1241 * 0--status is busy.
1242 * 1--status is unbusy.
1244 int dw_mci_card_busy(struct mmc_host *mmc)
1246 struct dw_mci_slot *slot = mmc_priv(mmc);
1247 struct dw_mci *host = slot->host;
1249 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1250 host->svi_flags, mmc_hostname(host->mmc));
1253 if(host->svi_flags == 0){
1255 host->svi_flags = 1;
1256 return host->svi_flags;
1259 host->svi_flags = 0;
1260 return host->svi_flags;
1266 static void __dw_mci_start_request(struct dw_mci *host,
1267 struct dw_mci_slot *slot,
1268 struct mmc_command *cmd)
1270 struct mmc_request *mrq;
1271 struct mmc_data *data;
1275 if (host->pdata->select_slot)
1276 host->pdata->select_slot(slot->id);
1278 host->cur_slot = slot;
1281 dw_mci_wait_unbusy(host);
1283 host->pending_events = 0;
1284 host->completed_events = 0;
1285 host->data_status = 0;
1289 dw_mci_set_timeout(host);
1290 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1291 mci_writel(host, BLKSIZ, data->blksz);
1294 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1296 /* this is the first command, send the initialization clock */
1297 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1298 cmdflags |= SDMMC_CMD_INIT;
1301 dw_mci_submit_data(host, data);
1305 dw_mci_start_command(host, cmd, cmdflags);
1308 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1311 static void dw_mci_start_request(struct dw_mci *host,
1312 struct dw_mci_slot *slot)
1314 struct mmc_request *mrq = slot->mrq;
1315 struct mmc_command *cmd;
1317 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1318 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1320 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1321 __dw_mci_start_request(host, slot, cmd);
1324 /* must be called with host->lock held */
1325 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1326 struct mmc_request *mrq)
1328 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1333 if (host->state == STATE_IDLE) {
1334 host->state = STATE_SENDING_CMD;
1335 dw_mci_start_request(host, slot);
1337 list_add_tail(&slot->queue_node, &host->queue);
1341 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1343 struct dw_mci_slot *slot = mmc_priv(mmc);
1344 struct dw_mci *host = slot->host;
1349 * The check for card presence and queueing of the request must be
1350 * atomic, otherwise the card could be removed in between and the
1351 * request wouldn't fail until another card was inserted.
1353 spin_lock_bh(&host->lock);
1355 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1356 spin_unlock_bh(&host->lock);
1357 mrq->cmd->error = -ENOMEDIUM;
1358 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1359 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1361 mmc_request_done(mmc, mrq);
1365 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1366 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1368 dw_mci_queue_request(host, slot, mrq);
1370 spin_unlock_bh(&host->lock);
1373 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1375 struct dw_mci_slot *slot = mmc_priv(mmc);
1376 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1377 struct dw_mci *host = slot->host;
1379 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1382 #ifdef SDMMC_WAIT_FOR_UNBUSY
1383 unsigned long time_loop;
1386 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1387 if(host->svi_flags == 1)
1388 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1390 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1392 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1395 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1396 printk("%d..%s: no card. [%s]\n", \
1397 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1402 ret = time_before(jiffies, time_loop);
1403 regs = mci_readl(slot->host, STATUS);
1404 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1410 printk("slot->flags = %lu ", slot->flags);
1411 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1412 if(host->svi_flags != 1)
1415 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1416 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1420 switch (ios->bus_width) {
1421 case MMC_BUS_WIDTH_4:
1422 slot->ctype = SDMMC_CTYPE_4BIT;
1424 case MMC_BUS_WIDTH_8:
1425 slot->ctype = SDMMC_CTYPE_8BIT;
1428 /* set default 1 bit mode */
1429 slot->ctype = SDMMC_CTYPE_1BIT;
1430 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1433 regs = mci_readl(slot->host, UHS_REG);
1436 if (ios->timing == MMC_TIMING_UHS_DDR50)
1437 regs |= ((0x1 << slot->id) << 16);
1439 regs &= ~((0x1 << slot->id) << 16);
1441 mci_writel(slot->host, UHS_REG, regs);
1442 slot->host->timing = ios->timing;
1445 * Use mirror of ios->clock to prevent race with mmc
1446 * core ios update when finding the minimum.
1448 slot->clock = ios->clock;
1450 if (drv_data && drv_data->set_ios)
1451 drv_data->set_ios(slot->host, ios);
1453 /* Slot specific timing and width adjustment */
1454 dw_mci_setup_bus(slot, false);
1458 switch (ios->power_mode) {
1460 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1462 if (slot->host->pdata->setpower)
1463 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1464 regs = mci_readl(slot->host, PWREN);
1465 regs |= (1 << slot->id);
1466 mci_writel(slot->host, PWREN, regs);
1469 /* Power down slot */
1470 if(slot->host->pdata->setpower)
1471 slot->host->pdata->setpower(slot->id, 0);
1472 regs = mci_readl(slot->host, PWREN);
1473 regs &= ~(1 << slot->id);
1474 mci_writel(slot->host, PWREN, regs);
1481 static int dw_mci_get_ro(struct mmc_host *mmc)
1484 struct dw_mci_slot *slot = mmc_priv(mmc);
1485 struct dw_mci_board *brd = slot->host->pdata;
1487 /* Use platform get_ro function, else try on board write protect */
1488 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1490 else if(brd->get_ro)
1491 read_only = brd->get_ro(slot->id);
1492 else if(gpio_is_valid(slot->wp_gpio))
1493 read_only = gpio_get_value(slot->wp_gpio);
1496 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1498 dev_dbg(&mmc->class_dev, "card is %s\n",
1499 read_only ? "read-only" : "read-write");
1504 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1506 struct dw_mci_slot *slot = mmc_priv(mmc);
1507 struct dw_mci *host = slot->host;
1508 /*struct dw_mci_board *brd = slot->host->pdata;*/
1510 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1513 spin_lock_bh(&host->lock);
1516 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1518 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1520 spin_unlock_bh(&host->lock);
1522 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1523 if(__clk_is_enabled(host->hclk_mmc) == false)
1524 clk_prepare_enable(host->hclk_mmc);
1525 if(__clk_is_enabled(host->clk_mmc) == false)
1526 clk_prepare_enable(host->clk_mmc);
1528 if(__clk_is_enabled(host->clk_mmc) == true)
1529 clk_disable_unprepare(slot->host->clk_mmc);
1530 if(__clk_is_enabled(host->hclk_mmc) == true)
1531 clk_disable_unprepare(slot->host->hclk_mmc);
1534 mmc_detect_change(slot->mmc, 20);
1540 static int dw_mci_get_cd(struct mmc_host *mmc)
1543 struct dw_mci_slot *slot = mmc_priv(mmc);
1544 struct dw_mci_board *brd = slot->host->pdata;
1545 struct dw_mci *host = slot->host;
1546 int gpio_cd = mmc_gpio_get_cd(mmc);
1549 if ((soc_is_rk3126() || soc_is_rk3126b()) &&
1550 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1551 gpio_cd = slot->cd_gpio;
1552 if (gpio_is_valid(gpio_cd)) {
1553 gpio_val = gpio_get_value(gpio_cd);
1555 if (gpio_val == gpio_get_value(gpio_cd)) {
1556 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1558 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1559 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1560 dw_mci_ctrl_all_reset(host);
1562 /* Really card detected: SHOULD disable force_jtag */
1563 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1567 return slot->last_detect_state;
1570 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1574 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1575 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1577 /* Use platform get_cd function, else try onboard card detect */
1578 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1580 else if (brd->get_cd)
1581 present = !brd->get_cd(slot->id);
1582 else if (!IS_ERR_VALUE(gpio_cd))
1585 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1588 spin_lock_bh(&host->lock);
1590 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1591 dev_dbg(&mmc->class_dev, "card is present\n");
1593 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1594 dev_dbg(&mmc->class_dev, "card is not present\n");
1596 spin_unlock_bh(&host->lock);
1603 * Dts Should caps emmc controller with poll-hw-reset
1605 static void dw_mci_hw_reset(struct mmc_host *mmc)
1607 struct dw_mci_slot *slot = mmc_priv(mmc);
1608 struct dw_mci *host = slot->host;
1613 unsigned long timeout;
1616 /* (1) CMD12 to end any transfer in process */
1617 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1618 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1620 if(host->mmc->hold_reg_flag)
1621 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1622 mci_writel(host, CMDARG, 0);
1624 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1626 timeout = jiffies + msecs_to_jiffies(500);
1628 ret = time_before(jiffies, timeout);
1629 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1634 MMC_DBG_ERR_FUNC(host->mmc,
1635 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1636 __func__, mmc_hostname(host->mmc));
1638 /* (2) wait DTO, even if no response is sent back by card */
1640 timeout = jiffies + msecs_to_jiffies(5);
1642 ret = time_before(jiffies, timeout);
1643 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1644 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1650 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1652 /* Software reset - BMOD[0] for IDMA only */
1653 regs = mci_readl(host, BMOD);
1654 regs |= SDMMC_IDMAC_SWRESET;
1655 mci_writel(host, BMOD, regs);
1656 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1657 regs = mci_readl(host, BMOD);
1658 if(regs & SDMMC_IDMAC_SWRESET)
1659 MMC_DBG_WARN_FUNC(host->mmc,
1660 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1661 __func__, mmc_hostname(host->mmc));
1663 /* DMA reset - CTRL[2] */
1664 regs = mci_readl(host, CTRL);
1665 regs |= SDMMC_CTRL_DMA_RESET;
1666 mci_writel(host, CTRL, regs);
1667 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1668 regs = mci_readl(host, CTRL);
1669 if(regs & SDMMC_CTRL_DMA_RESET)
1670 MMC_DBG_WARN_FUNC(host->mmc,
1671 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1672 __func__, mmc_hostname(host->mmc));
1674 /* FIFO reset - CTRL[1] */
1675 regs = mci_readl(host, CTRL);
1676 regs |= SDMMC_CTRL_FIFO_RESET;
1677 mci_writel(host, CTRL, regs);
1678 mdelay(1); /* no timing limited, 1ms is random value */
1679 regs = mci_readl(host, CTRL);
1680 if(regs & SDMMC_CTRL_FIFO_RESET)
1681 MMC_DBG_WARN_FUNC(host->mmc,
1682 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1683 __func__, mmc_hostname(host->mmc));
1686 According to eMMC spec
1687 tRstW >= 1us ; RST_n pulse width
1688 tRSCA >= 200us ; RST_n to Command time
1689 tRSTH >= 1us ; RST_n high period
1691 mci_writel(slot->host, PWREN, 0x0);
1692 mci_writel(slot->host, RST_N, 0x0);
1694 udelay(10); /* 10us for bad quality eMMc. */
1696 mci_writel(slot->host, PWREN, 0x1);
1697 mci_writel(slot->host, RST_N, 0x1);
1699 usleep_range(500, 1000); /* at least 500(> 200us) */
1703 * Disable lower power mode.
1705 * Low power mode will stop the card clock when idle. According to the
1706 * description of the CLKENA register we should disable low power mode
1707 * for SDIO cards if we need SDIO interrupts to work.
1709 * This function is fast if low power mode is already disabled.
1711 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1713 struct dw_mci *host = slot->host;
1715 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1717 clk_en_a = mci_readl(host, CLKENA);
1719 if (clk_en_a & clken_low_pwr) {
1720 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1721 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1722 SDMMC_CMD_PRV_DAT_WAIT, 0);
1726 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1728 struct dw_mci_slot *slot = mmc_priv(mmc);
1729 struct dw_mci *host = slot->host;
1730 unsigned long flags;
1734 spin_lock_irqsave(&host->slock, flags);
1736 /* Enable/disable Slot Specific SDIO interrupt */
1737 int_mask = mci_readl(host, INTMASK);
1739 if (host->verid < DW_MMC_240A)
1740 sdio_int = SDMMC_INT_SDIO(slot->id);
1742 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1746 * Turn off low power mode if it was enabled. This is a bit of
1747 * a heavy operation and we disable / enable IRQs a lot, so
1748 * we'll leave low power mode disabled and it will get
1749 * re-enabled again in dw_mci_setup_bus().
1751 dw_mci_disable_low_power(slot);
1753 mci_writel(host, INTMASK,
1754 (int_mask | sdio_int));
1756 mci_writel(host, INTMASK,
1757 (int_mask & ~sdio_int));
1760 spin_unlock_irqrestore(&host->slock, flags);
1763 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1765 IO_DOMAIN_12 = 1200,
1766 IO_DOMAIN_18 = 1800,
1767 IO_DOMAIN_33 = 3300,
1769 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1779 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1780 __FUNCTION__, mmc_hostname(host->mmc));
1783 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1784 __FUNCTION__, mmc_hostname(host->mmc));
1788 if(cpu_is_rk3288()){
1789 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1790 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1794 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1795 __FUNCTION__, mmc_hostname(host->mmc));
1799 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1800 struct mmc_ios *ios)
1803 unsigned int value,uhs_reg;
1806 * Signal Voltage Switching is only applicable for Host Controllers
1809 if (host->verid < DW_MMC_240A)
1812 uhs_reg = mci_readl(host, UHS_REG);
1813 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1814 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1816 switch (ios->signal_voltage) {
1817 case MMC_SIGNAL_VOLTAGE_330:
1818 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1820 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1821 /* regulator_put(host->vmmc); //to be done in remove function. */
1823 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1824 __func__, regulator_get_voltage(host->vmmc), ret);
1826 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1827 " failed\n", mmc_hostname(host->mmc));
1830 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1832 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1833 __FUNCTION__, mmc_hostname(host->mmc));
1835 /* set High-power mode */
1836 value = mci_readl(host, CLKENA);
1837 value &= ~SDMMC_CLKEN_LOW_PWR;
1838 mci_writel(host,CLKENA , value);
1840 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1841 mci_writel(host,UHS_REG , uhs_reg);
1844 usleep_range(5000, 5500);
1846 /* 3.3V regulator output should be stable within 5 ms */
1847 uhs_reg = mci_readl(host, UHS_REG);
1848 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1851 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1852 mmc_hostname(host->mmc));
1855 case MMC_SIGNAL_VOLTAGE_180:
1857 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1858 /* regulator_put(host->vmmc);//to be done in remove function. */
1860 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1861 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1863 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1864 " failed\n", mmc_hostname(host->mmc));
1867 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1871 * Enable 1.8V Signal Enable in the Host Control2
1874 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1877 usleep_range(5000, 5500);
1878 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1879 __FUNCTION__,mmc_hostname(host->mmc));
1881 /* 1.8V regulator output should be stable within 5 ms */
1882 uhs_reg = mci_readl(host, UHS_REG);
1883 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1886 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1887 mmc_hostname(host->mmc));
1890 case MMC_SIGNAL_VOLTAGE_120:
1892 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1894 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1895 " failed\n", mmc_hostname(host->mmc));
1901 /* No signal voltage switch required */
1907 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1908 struct mmc_ios *ios)
1910 struct dw_mci_slot *slot = mmc_priv(mmc);
1911 struct dw_mci *host = slot->host;
1914 if (host->verid < DW_MMC_240A)
1917 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1923 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1925 struct dw_mci_slot *slot = mmc_priv(mmc);
1926 struct dw_mci *host = slot->host;
1927 const struct dw_mci_drv_data *drv_data = host->drv_data;
1928 struct dw_mci_tuning_data tuning_data;
1931 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1932 if(cpu_is_rk3036() || cpu_is_rk312x())
1935 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1936 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1937 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1938 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1939 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1940 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1941 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1945 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1946 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1947 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1950 "Undefined command(%d) for tuning\n", opcode);
1955 /* Recommend sample phase and delayline
1956 Fixme: Mix-use these three controllers will cause
1959 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1960 tuning_data.con_id = 3;
1961 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1962 tuning_data.con_id = 1;
1964 tuning_data.con_id = 0;
1966 /* 0: driver, from host->devices
1967 1: sample, from devices->host
1969 tuning_data.tuning_type = 1;
1971 if (drv_data && drv_data->execute_tuning)
1972 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1977 static const struct mmc_host_ops dw_mci_ops = {
1978 .request = dw_mci_request,
1979 .pre_req = dw_mci_pre_req,
1980 .post_req = dw_mci_post_req,
1981 .set_ios = dw_mci_set_ios,
1982 .get_ro = dw_mci_get_ro,
1983 .get_cd = dw_mci_get_cd,
1984 .set_sdio_status = dw_mci_set_sdio_status,
1985 .hw_reset = dw_mci_hw_reset,
1986 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1987 .execute_tuning = dw_mci_execute_tuning,
1988 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1989 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1990 .card_busy = dw_mci_card_busy,
1995 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1997 unsigned long flags;
2002 local_irq_save(flags);
2003 if(host->irq_state != irqflag)
2005 host->irq_state = irqflag;
2008 enable_irq(host->irq);
2012 disable_irq(host->irq);
2015 local_irq_restore(flags);
2019 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2020 __releases(&host->lock)
2021 __acquires(&host->lock)
2023 if(DW_MCI_SEND_STATUS == host->dir_status){
2025 if( MMC_BUS_TEST_W != host->cmd->opcode){
2026 if(host->data_status & SDMMC_INT_DCRC)
2027 host->data->error = -EILSEQ;
2028 else if(host->data_status & SDMMC_INT_EBE)
2029 host->data->error = -ETIMEDOUT;
2031 dw_mci_wait_unbusy(host);
2034 dw_mci_wait_unbusy(host);
2039 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2040 __releases(&host->lock)
2041 __acquires(&host->lock)
2043 struct dw_mci_slot *slot;
2044 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2046 WARN_ON(host->cmd || host->data);
2048 del_timer_sync(&host->dto_timer);
2049 dw_mci_deal_data_end(host, mrq);
2052 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2053 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2055 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2056 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2058 host->cur_slot->mrq = NULL;
2060 if (!list_empty(&host->queue)) {
2061 slot = list_entry(host->queue.next,
2062 struct dw_mci_slot, queue_node);
2063 list_del(&slot->queue_node);
2064 dev_vdbg(host->dev, "list not empty: %s is next\n",
2065 mmc_hostname(slot->mmc));
2066 host->state = STATE_SENDING_CMD;
2067 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2068 dw_mci_start_request(host, slot);
2070 dev_vdbg(host->dev, "list empty\n");
2071 host->state = STATE_IDLE;
2074 spin_unlock(&host->lock);
2075 mmc_request_done(prev_mmc, mrq);
2076 spin_lock(&host->lock);
2079 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2081 u32 status = host->cmd_status;
2083 host->cmd_status = 0;
2085 /* Read the response from the card (up to 16 bytes) */
2086 if (cmd->flags & MMC_RSP_PRESENT) {
2087 if (cmd->flags & MMC_RSP_136) {
2088 cmd->resp[3] = mci_readl(host, RESP0);
2089 cmd->resp[2] = mci_readl(host, RESP1);
2090 cmd->resp[1] = mci_readl(host, RESP2);
2091 cmd->resp[0] = mci_readl(host, RESP3);
2093 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2094 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2096 cmd->resp[0] = mci_readl(host, RESP0);
2100 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2101 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2105 if (status & SDMMC_INT_RTO)
2107 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2110 cmd->error = -ETIMEDOUT;
2111 del_timer_sync(&host->dto_timer);
2112 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2113 del_timer_sync(&host->dto_timer);
2114 cmd->error = -EILSEQ;
2115 }else if (status & SDMMC_INT_RESP_ERR){
2116 del_timer_sync(&host->dto_timer);
2121 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2122 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2125 del_timer_sync(&host->dto_timer);
2126 if(MMC_SEND_STATUS != cmd->opcode)
2127 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2128 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2129 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2133 /* newer ip versions need a delay between retries */
2134 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2140 static void dw_mci_tasklet_func(unsigned long priv)
2142 struct dw_mci *host = (struct dw_mci *)priv;
2143 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2144 struct mmc_data *data;
2145 struct mmc_command *cmd;
2146 enum dw_mci_state state;
2147 enum dw_mci_state prev_state;
2148 u32 status, cmd_flags;
2149 unsigned long timeout = 0;
2152 spin_lock(&host->lock);
2154 state = host->state;
2164 case STATE_SENDING_CMD:
2165 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2166 &host->pending_events))
2171 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2172 dw_mci_command_complete(host, cmd);
2173 if (cmd == host->mrq->sbc && !cmd->error) {
2174 prev_state = state = STATE_SENDING_CMD;
2175 __dw_mci_start_request(host, host->cur_slot,
2180 if (cmd->data && cmd->error) {
2181 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2182 dw_mci_stop_dma(host);
2185 send_stop_cmd(host, data);
2186 state = STATE_SENDING_STOP;
2192 send_stop_abort(host, data);
2193 state = STATE_SENDING_STOP;
2196 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2199 if (!host->mrq->data || cmd->error) {
2200 dw_mci_request_end(host, host->mrq);
2204 prev_state = state = STATE_SENDING_DATA;
2207 case STATE_SENDING_DATA:
2208 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2209 dw_mci_stop_dma(host);
2212 send_stop_cmd(host, data);
2214 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2215 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2216 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2218 mci_writel(host, CMDARG, 0);
2220 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2221 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2223 if(host->mmc->hold_reg_flag)
2224 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2226 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2228 timeout = jiffies + msecs_to_jiffies(500);
2231 ret = time_before(jiffies, timeout);
2232 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2236 MMC_DBG_ERR_FUNC(host->mmc,
2237 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2238 __func__, mmc_hostname(host->mmc));
2241 send_stop_abort(host, data);
2243 state = STATE_DATA_ERROR;
2247 MMC_DBG_CMD_FUNC(host->mmc,
2248 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2249 prev_state,state, mmc_hostname(host->mmc));
2251 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2252 &host->pending_events))
2254 MMC_DBG_INFO_FUNC(host->mmc,
2255 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2256 prev_state,state,mmc_hostname(host->mmc));
2258 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2259 prev_state = state = STATE_DATA_BUSY;
2262 case STATE_DATA_BUSY:
2263 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2264 &host->pending_events))
2267 dw_mci_deal_data_end(host, host->mrq);
2268 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2269 MMC_DBG_INFO_FUNC(host->mmc,
2270 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2271 prev_state,state,mmc_hostname(host->mmc));
2274 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2275 status = host->data_status;
2277 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2278 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2279 MMC_DBG_ERR_FUNC(host->mmc,
2280 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2281 prev_state,state, status, mmc_hostname(host->mmc));
2283 if (status & SDMMC_INT_DRTO) {
2284 data->error = -ETIMEDOUT;
2285 } else if (status & SDMMC_INT_DCRC) {
2286 data->error = -EILSEQ;
2287 } else if (status & SDMMC_INT_EBE &&
2288 host->dir_status == DW_MCI_SEND_STATUS){
2290 * No data CRC status was returned.
2291 * The number of bytes transferred will
2292 * be exaggerated in PIO mode.
2294 data->bytes_xfered = 0;
2295 data->error = -ETIMEDOUT;
2304 * After an error, there may be data lingering
2305 * in the FIFO, so reset it - doing so
2306 * generates a block interrupt, hence setting
2307 * the scatter-gather pointer to NULL.
2309 dw_mci_fifo_reset(host);
2311 data->bytes_xfered = data->blocks * data->blksz;
2316 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2317 prev_state,state,mmc_hostname(host->mmc));
2318 dw_mci_request_end(host, host->mrq);
2321 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2322 prev_state,state,mmc_hostname(host->mmc));
2324 if (host->mrq->sbc && !data->error) {
2325 data->stop->error = 0;
2327 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2328 prev_state,state,mmc_hostname(host->mmc));
2330 dw_mci_request_end(host, host->mrq);
2334 prev_state = state = STATE_SENDING_STOP;
2336 send_stop_cmd(host, data);
2338 if (data->stop && !data->error) {
2339 /* stop command for open-ended transfer*/
2341 send_stop_abort(host, data);
2345 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2346 prev_state,state,mmc_hostname(host->mmc));
2348 case STATE_SENDING_STOP:
2349 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2352 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2353 prev_state, state, mmc_hostname(host->mmc));
2355 /* CMD error in data command */
2356 if (host->mrq->cmd->error && host->mrq->data) {
2357 dw_mci_fifo_reset(host);
2363 dw_mci_command_complete(host, host->mrq->stop);
2365 if (host->mrq->stop)
2366 dw_mci_command_complete(host, host->mrq->stop);
2368 host->cmd_status = 0;
2371 dw_mci_request_end(host, host->mrq);
2374 case STATE_DATA_ERROR:
2375 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2376 &host->pending_events))
2379 state = STATE_DATA_BUSY;
2382 } while (state != prev_state);
2384 host->state = state;
2386 spin_unlock(&host->lock);
2390 /* push final bytes to part_buf, only use during push */
2391 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2393 memcpy((void *)&host->part_buf, buf, cnt);
2394 host->part_buf_count = cnt;
2397 /* append bytes to part_buf, only use during push */
2398 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2400 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2401 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2402 host->part_buf_count += cnt;
2406 /* pull first bytes from part_buf, only use during pull */
2407 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2409 cnt = min(cnt, (int)host->part_buf_count);
2411 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2413 host->part_buf_count -= cnt;
2414 host->part_buf_start += cnt;
2419 /* pull final bytes from the part_buf, assuming it's just been filled */
2420 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2422 memcpy(buf, &host->part_buf, cnt);
2423 host->part_buf_start = cnt;
2424 host->part_buf_count = (1 << host->data_shift) - cnt;
2427 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2429 struct mmc_data *data = host->data;
2432 /* try and push anything in the part_buf */
2433 if (unlikely(host->part_buf_count)) {
2434 int len = dw_mci_push_part_bytes(host, buf, cnt);
2437 if (host->part_buf_count == 2) {
2438 mci_writew(host, DATA(host->data_offset),
2440 host->part_buf_count = 0;
2443 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2444 if (unlikely((unsigned long)buf & 0x1)) {
2446 u16 aligned_buf[64];
2447 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2448 int items = len >> 1;
2450 /* memcpy from input buffer into aligned buffer */
2451 memcpy(aligned_buf, buf, len);
2454 /* push data from aligned buffer into fifo */
2455 for (i = 0; i < items; ++i)
2456 mci_writew(host, DATA(host->data_offset),
2463 for (; cnt >= 2; cnt -= 2)
2464 mci_writew(host, DATA(host->data_offset), *pdata++);
2467 /* put anything remaining in the part_buf */
2469 dw_mci_set_part_bytes(host, buf, cnt);
2470 /* Push data if we have reached the expected data length */
2471 if ((data->bytes_xfered + init_cnt) ==
2472 (data->blksz * data->blocks))
2473 mci_writew(host, DATA(host->data_offset),
2478 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2480 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2481 if (unlikely((unsigned long)buf & 0x1)) {
2483 /* pull data from fifo into aligned buffer */
2484 u16 aligned_buf[64];
2485 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2486 int items = len >> 1;
2488 for (i = 0; i < items; ++i)
2489 aligned_buf[i] = mci_readw(host,
2490 DATA(host->data_offset));
2491 /* memcpy from aligned buffer into output buffer */
2492 memcpy(buf, aligned_buf, len);
2500 for (; cnt >= 2; cnt -= 2)
2501 *pdata++ = mci_readw(host, DATA(host->data_offset));
2505 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2506 dw_mci_pull_final_bytes(host, buf, cnt);
2510 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2512 struct mmc_data *data = host->data;
2515 /* try and push anything in the part_buf */
2516 if (unlikely(host->part_buf_count)) {
2517 int len = dw_mci_push_part_bytes(host, buf, cnt);
2520 if (host->part_buf_count == 4) {
2521 mci_writel(host, DATA(host->data_offset),
2523 host->part_buf_count = 0;
2526 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2527 if (unlikely((unsigned long)buf & 0x3)) {
2529 u32 aligned_buf[32];
2530 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2531 int items = len >> 2;
2533 /* memcpy from input buffer into aligned buffer */
2534 memcpy(aligned_buf, buf, len);
2537 /* push data from aligned buffer into fifo */
2538 for (i = 0; i < items; ++i)
2539 mci_writel(host, DATA(host->data_offset),
2546 for (; cnt >= 4; cnt -= 4)
2547 mci_writel(host, DATA(host->data_offset), *pdata++);
2550 /* put anything remaining in the part_buf */
2552 dw_mci_set_part_bytes(host, buf, cnt);
2553 /* Push data if we have reached the expected data length */
2554 if ((data->bytes_xfered + init_cnt) ==
2555 (data->blksz * data->blocks))
2556 mci_writel(host, DATA(host->data_offset),
2561 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2563 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2564 if (unlikely((unsigned long)buf & 0x3)) {
2566 /* pull data from fifo into aligned buffer */
2567 u32 aligned_buf[32];
2568 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2569 int items = len >> 2;
2571 for (i = 0; i < items; ++i)
2572 aligned_buf[i] = mci_readl(host,
2573 DATA(host->data_offset));
2574 /* memcpy from aligned buffer into output buffer */
2575 memcpy(buf, aligned_buf, len);
2583 for (; cnt >= 4; cnt -= 4)
2584 *pdata++ = mci_readl(host, DATA(host->data_offset));
2588 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2589 dw_mci_pull_final_bytes(host, buf, cnt);
2593 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2595 struct mmc_data *data = host->data;
2598 /* try and push anything in the part_buf */
2599 if (unlikely(host->part_buf_count)) {
2600 int len = dw_mci_push_part_bytes(host, buf, cnt);
2604 if (host->part_buf_count == 8) {
2605 mci_writeq(host, DATA(host->data_offset),
2607 host->part_buf_count = 0;
2610 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2611 if (unlikely((unsigned long)buf & 0x7)) {
2613 u64 aligned_buf[16];
2614 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2615 int items = len >> 3;
2617 /* memcpy from input buffer into aligned buffer */
2618 memcpy(aligned_buf, buf, len);
2621 /* push data from aligned buffer into fifo */
2622 for (i = 0; i < items; ++i)
2623 mci_writeq(host, DATA(host->data_offset),
2630 for (; cnt >= 8; cnt -= 8)
2631 mci_writeq(host, DATA(host->data_offset), *pdata++);
2634 /* put anything remaining in the part_buf */
2636 dw_mci_set_part_bytes(host, buf, cnt);
2637 /* Push data if we have reached the expected data length */
2638 if ((data->bytes_xfered + init_cnt) ==
2639 (data->blksz * data->blocks))
2640 mci_writeq(host, DATA(host->data_offset),
2645 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2647 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2648 if (unlikely((unsigned long)buf & 0x7)) {
2650 /* pull data from fifo into aligned buffer */
2651 u64 aligned_buf[16];
2652 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2653 int items = len >> 3;
2655 for (i = 0; i < items; ++i)
2656 aligned_buf[i] = mci_readq(host,
2657 DATA(host->data_offset));
2658 /* memcpy from aligned buffer into output buffer */
2659 memcpy(buf, aligned_buf, len);
2667 for (; cnt >= 8; cnt -= 8)
2668 *pdata++ = mci_readq(host, DATA(host->data_offset));
2672 host->part_buf = mci_readq(host, DATA(host->data_offset));
2673 dw_mci_pull_final_bytes(host, buf, cnt);
2677 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2681 /* get remaining partial bytes */
2682 len = dw_mci_pull_part_bytes(host, buf, cnt);
2683 if (unlikely(len == cnt))
2688 /* get the rest of the data */
2689 host->pull_data(host, buf, cnt);
2692 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2694 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2696 unsigned int offset;
2697 struct mmc_data *data = host->data;
2698 int shift = host->data_shift;
2701 unsigned int remain, fcnt;
2703 if(!host->mmc->bus_refs){
2704 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2708 if (!sg_miter_next(sg_miter))
2711 host->sg = sg_miter->piter.sg;
2712 buf = sg_miter->addr;
2713 remain = sg_miter->length;
2717 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2718 << shift) + host->part_buf_count;
2719 len = min(remain, fcnt);
2722 dw_mci_pull_data(host, (void *)(buf + offset), len);
2723 data->bytes_xfered += len;
2728 sg_miter->consumed = offset;
2729 status = mci_readl(host, MINTSTS);
2730 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2731 /* if the RXDR is ready read again */
2732 } while ((status & SDMMC_INT_RXDR) ||
2733 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2736 if (!sg_miter_next(sg_miter))
2738 sg_miter->consumed = 0;
2740 sg_miter_stop(sg_miter);
2744 sg_miter_stop(sg_miter);
2748 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2751 static void dw_mci_write_data_pio(struct dw_mci *host)
2753 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2755 unsigned int offset;
2756 struct mmc_data *data = host->data;
2757 int shift = host->data_shift;
2760 unsigned int fifo_depth = host->fifo_depth;
2761 unsigned int remain, fcnt;
2763 if(!host->mmc->bus_refs){
2764 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2769 if (!sg_miter_next(sg_miter))
2772 host->sg = sg_miter->piter.sg;
2773 buf = sg_miter->addr;
2774 remain = sg_miter->length;
2778 fcnt = ((fifo_depth -
2779 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2780 << shift) - host->part_buf_count;
2781 len = min(remain, fcnt);
2784 host->push_data(host, (void *)(buf + offset), len);
2785 data->bytes_xfered += len;
2790 sg_miter->consumed = offset;
2791 status = mci_readl(host, MINTSTS);
2792 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2793 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2796 if (!sg_miter_next(sg_miter))
2798 sg_miter->consumed = 0;
2800 sg_miter_stop(sg_miter);
2804 sg_miter_stop(sg_miter);
2808 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2811 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2813 u32 multi, unit = SZ_2M;
2815 if (!host->cmd_status)
2816 host->cmd_status = status;
2821 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2822 multi = (mci_readl(host, BYTCNT) / unit) +
2823 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2824 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2825 /* Max limit time: 8s for dto */
2826 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2831 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2832 tasklet_schedule(&host->tasklet);
2835 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2837 struct dw_mci *host = dev_id;
2838 u32 pending, sdio_int;
2841 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2844 * DTO fix - version 2.10a and below, and only if internal DMA
2847 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2849 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2850 pending |= SDMMC_INT_DATA_OVER;
2854 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2855 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2856 host->cmd_status = pending;
2858 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2859 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2861 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2864 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2865 /* if there is an error report DATA_ERROR */
2866 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2867 host->data_status = pending;
2869 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2871 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2872 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2873 tasklet_schedule(&host->tasklet);
2876 if (pending & SDMMC_INT_DATA_OVER) {
2877 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2878 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2879 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2880 if (!host->data_status)
2881 host->data_status = pending;
2883 if (host->dir_status == DW_MCI_RECV_STATUS) {
2884 if (host->sg != NULL)
2885 dw_mci_read_data_pio(host, true);
2887 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2888 tasklet_schedule(&host->tasklet);
2891 if (pending & SDMMC_INT_RXDR) {
2892 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2893 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2894 dw_mci_read_data_pio(host, false);
2897 if (pending & SDMMC_INT_TXDR) {
2898 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2899 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2900 dw_mci_write_data_pio(host);
2903 if (pending & SDMMC_INT_VSI) {
2904 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2905 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2906 dw_mci_cmd_interrupt(host, pending);
2909 if (pending & SDMMC_INT_CMD_DONE) {
2910 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2911 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2912 dw_mci_cmd_interrupt(host, pending);
2915 if (pending & SDMMC_INT_CD) {
2916 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2917 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2918 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2919 queue_work(host->card_workqueue, &host->card_work);
2922 if (pending & SDMMC_INT_HLE) {
2923 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2924 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2928 /* Handle SDIO Interrupts */
2929 for (i = 0; i < host->num_slots; i++) {
2930 struct dw_mci_slot *slot = host->slot[i];
2932 if (host->verid < DW_MMC_240A)
2933 sdio_int = SDMMC_INT_SDIO(i);
2935 sdio_int = SDMMC_INT_SDIO(i + 8);
2937 if (pending & sdio_int) {
2938 mci_writel(host, RINTSTS, sdio_int);
2939 mmc_signal_sdio_irq(slot->mmc);
2945 #ifdef CONFIG_MMC_DW_IDMAC
2946 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2947 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2948 /* Handle DMA interrupts */
2949 pending = mci_readl(host, IDSTS);
2950 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2951 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2952 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2953 host->dma_ops->complete((void *)host);
2961 static void dw_mci_work_routine_card(struct work_struct *work)
2963 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2966 for (i = 0; i < host->num_slots; i++) {
2967 struct dw_mci_slot *slot = host->slot[i];
2968 struct mmc_host *mmc = slot->mmc;
2969 struct mmc_request *mrq;
2972 present = dw_mci_get_cd(mmc);
2974 /* Card insert, switch data line to uart function, and vice verse.
2975 * ONLY audi chip need switched by software, using udbg tag in dts!
2977 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
2979 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2980 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
2981 mmc_hostname(host->mmc));
2983 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
2984 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
2985 mmc_hostname(host->mmc));
2989 while (present != slot->last_detect_state) {
2990 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2991 present ? "inserted" : "removed");
2992 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2993 present ? "inserted" : "removed.", mmc_hostname(mmc));
2995 dw_mci_ctrl_all_reset(host);
2996 /* Stop edma when rountine card triggered */
2997 if(cpu_is_rk3036() || cpu_is_rk312x())
2998 if(host->dma_ops && host->dma_ops->stop)
2999 host->dma_ops->stop(host);
3000 rk_send_wakeup_key();//wake up system
3001 spin_lock_bh(&host->lock);
3003 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
3004 /* Card change detected */
3005 slot->last_detect_state = present;
3007 /* Clean up queue if present */
3010 if (mrq == host->mrq) {
3014 switch (host->state) {
3017 case STATE_SENDING_CMD:
3018 mrq->cmd->error = -ENOMEDIUM;
3022 case STATE_SENDING_DATA:
3023 mrq->data->error = -ENOMEDIUM;
3024 dw_mci_stop_dma(host);
3026 case STATE_DATA_BUSY:
3027 case STATE_DATA_ERROR:
3028 if (mrq->data->error == -EINPROGRESS)
3029 mrq->data->error = -ENOMEDIUM;
3033 case STATE_SENDING_STOP:
3034 mrq->stop->error = -ENOMEDIUM;
3038 dw_mci_request_end(host, mrq);
3040 list_del(&slot->queue_node);
3041 mrq->cmd->error = -ENOMEDIUM;
3043 mrq->data->error = -ENOMEDIUM;
3045 mrq->stop->error = -ENOMEDIUM;
3047 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3048 mrq->cmd->opcode, mmc_hostname(mmc));
3050 spin_unlock(&host->lock);
3051 mmc_request_done(slot->mmc, mrq);
3052 spin_lock(&host->lock);
3056 /* Power down slot */
3058 /* Clear down the FIFO */
3059 dw_mci_fifo_reset(host);
3060 #ifdef CONFIG_MMC_DW_IDMAC
3061 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3062 dw_mci_idmac_reset(host);
3067 spin_unlock_bh(&host->lock);
3069 present = dw_mci_get_cd(mmc);
3072 mmc_detect_change(slot->mmc,
3073 msecs_to_jiffies(host->pdata->detect_delay_ms));
3078 /* given a slot id, find out the device node representing that slot */
3079 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3081 struct device_node *np;
3085 if (!dev || !dev->of_node)
3088 for_each_child_of_node(dev->of_node, np) {
3089 addr = of_get_property(np, "reg", &len);
3090 if (!addr || (len < sizeof(int)))
3092 if (be32_to_cpup(addr) == slot)
3098 static struct dw_mci_of_slot_quirks {
3101 } of_slot_quirks[] = {
3103 .quirk = "disable-wp",
3104 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3108 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3110 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3115 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3116 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3117 quirks |= of_slot_quirks[idx].id;
3122 /* find out bus-width for a given slot */
3123 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3125 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3131 if (of_property_read_u32(np, "bus-width", &bus_wd))
3132 dev_err(dev, "bus-width property not found, assuming width"
3138 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3139 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3141 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3147 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3149 /* Having a missing entry is valid; return silently */
3150 if (!gpio_is_valid(gpio))
3153 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3154 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3158 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3164 /* find the write protect gpio for a given slot; or -1 if none specified */
3165 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3167 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3173 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3175 /* Having a missing entry is valid; return silently */
3176 if (!gpio_is_valid(gpio))
3179 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3180 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3187 /* find the cd gpio for a given slot */
3188 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3189 struct mmc_host *mmc)
3191 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3197 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3199 /* Having a missing entry is valid; return silently */
3200 if (!gpio_is_valid(gpio))
3203 if (mmc_gpio_request_cd(mmc, gpio, 0))
3204 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3207 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3209 struct mmc_host *mmc = dev_id;
3210 struct dw_mci_slot *slot = mmc_priv(mmc);
3211 struct dw_mci *host = slot->host;
3213 /* wakeup system whether gpio debounce or not */
3214 rk_send_wakeup_key();
3216 /* no need to trigger detect flow when rescan is disabled.
3217 This case happended in dpm, that we just wakeup system and
3218 let suspend_post notify callback handle it.
3220 if(mmc->rescan_disable == 0)
3221 queue_work(host->card_workqueue, &host->card_work);
3223 printk("%s: rescan been disabled!\n", __FUNCTION__);
3228 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3229 struct mmc_host *mmc)
3231 struct dw_mci_slot *slot = mmc_priv(mmc);
3232 struct dw_mci *host = slot->host;
3236 /* Having a missing entry is valid; return silently */
3237 if (!gpio_is_valid(gpio))
3240 irq = gpio_to_irq(gpio);
3242 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3243 NULL, dw_mci_gpio_cd_irqt,
3244 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3248 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3250 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3251 enable_irq_wake(irq);
3254 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3258 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3259 struct mmc_host *mmc)
3261 if (!gpio_is_valid(gpio))
3264 if (gpio_to_irq(gpio) >= 0) {
3265 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3266 devm_gpio_free(&mmc->class_dev, gpio);
3269 #else /* CONFIG_OF */
3270 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3274 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3278 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3282 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3286 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3287 struct mmc_host *mmc)
3291 #endif /* CONFIG_OF */
3293 /* @host: dw_mci host prvdata
3294 * Init pinctrl for each platform. Usually we assign
3295 * "defalut" tag for functional usage, "idle" tag for gpio
3296 * state and "udbg" tag for uart_dbg if any.
3298 static void dw_mci_init_pinctrl(struct dw_mci *host)
3300 /* Fixme: DON'T TOUCH EMMC SETTING! */
3301 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3304 /* Get pinctrl for DTS */
3305 host->pinctrl = devm_pinctrl_get(host->dev);
3306 if (IS_ERR(host->pinctrl)) {
3307 dev_err(host->dev, "%s: No pinctrl used!\n",
3308 mmc_hostname(host->mmc));
3312 /* Lookup idle state */
3313 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3314 PINCTRL_STATE_IDLE);
3315 if (IS_ERR(host->pins_idle)) {
3316 dev_err(host->dev, "%s: No idle tag found!\n",
3317 mmc_hostname(host->mmc));
3319 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3320 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3321 mmc_hostname(host->mmc));
3324 /* Lookup default state */
3325 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3326 PINCTRL_STATE_DEFAULT);
3327 if (IS_ERR(host->pins_default)) {
3328 dev_err(host->dev, "%s: No default pinctrl found!\n",
3329 mmc_hostname(host->mmc));
3331 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3332 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3333 mmc_hostname(host->mmc));
3336 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3337 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3338 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3339 if (IS_ERR(host->pins_udbg)) {
3340 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3341 mmc_hostname(host->mmc));
3343 if (!dw_mci_get_cd(host->mmc))
3344 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3345 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3346 mmc_hostname(host->mmc));
3351 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3352 unsigned long mode, void *unused)
3354 struct mmc_host *host = container_of(
3355 notify_block, struct mmc_host, pm_notify);
3356 unsigned long flags;
3359 case PM_HIBERNATION_PREPARE:
3360 case PM_SUSPEND_PREPARE:
3361 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3362 spin_lock_irqsave(&host->lock, flags);
3363 host->rescan_disable = 1;
3364 spin_unlock_irqrestore(&host->lock, flags);
3365 if (cancel_delayed_work(&host->detect))
3366 wake_unlock(&host->detect_wake_lock);
3369 case PM_POST_SUSPEND:
3370 case PM_POST_HIBERNATION:
3371 case PM_POST_RESTORE:
3372 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3373 spin_lock_irqsave(&host->lock, flags);
3374 host->rescan_disable = 0;
3375 spin_unlock_irqrestore(&host->lock, flags);
3376 mmc_detect_change(host, 10);
3382 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3384 struct mmc_host *mmc;
3385 struct dw_mci_slot *slot;
3386 const struct dw_mci_drv_data *drv_data = host->drv_data;
3391 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3395 slot = mmc_priv(mmc);
3399 host->slot[id] = slot;
3402 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3404 mmc->ops = &dw_mci_ops;
3406 if (of_property_read_u32_array(host->dev->of_node,
3407 "clock-freq-min-max", freq, 2)) {
3408 mmc->f_min = DW_MCI_FREQ_MIN;
3409 mmc->f_max = DW_MCI_FREQ_MAX;
3411 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3412 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3414 mmc->f_min = freq[0];
3415 mmc->f_max = freq[1];
3417 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3418 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3421 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3423 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3424 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3425 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3426 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3427 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3428 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3430 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3431 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3433 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3434 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3435 if (register_pm_notifier(&mmc->pm_notify)) {
3436 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3437 goto err_pm_notifier;
3441 /* We assume only low-level chip use gpio_cd */
3442 if ((soc_is_rk3126() || soc_is_rk3126b()) &&
3443 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3444 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3445 if (gpio_is_valid(slot->cd_gpio)) {
3446 /* Request gpio int for card detection */
3447 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3449 slot->cd_gpio = -ENODEV;
3450 dev_err(host->dev, "failed to get your cd-gpios!\n");
3454 if (host->pdata->get_ocr)
3455 mmc->ocr_avail = host->pdata->get_ocr(id);
3458 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3459 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3460 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3461 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3465 * Start with slot power disabled, it will be enabled when a card
3468 if (host->pdata->setpower)
3469 host->pdata->setpower(id, 0);
3471 if (host->pdata->caps)
3472 mmc->caps = host->pdata->caps;
3474 if (host->pdata->pm_caps)
3475 mmc->pm_caps = host->pdata->pm_caps;
3477 if (host->dev->of_node) {
3478 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3482 ctrl_id = to_platform_device(host->dev)->id;
3484 if (drv_data && drv_data->caps)
3485 mmc->caps |= drv_data->caps[ctrl_id];
3486 if (drv_data && drv_data->hold_reg_flag)
3487 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3489 /* set the compatibility of driver. */
3490 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3491 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3493 if (host->pdata->caps2)
3494 mmc->caps2 = host->pdata->caps2;
3496 if (host->pdata->get_bus_wd)
3497 bus_width = host->pdata->get_bus_wd(slot->id);
3498 else if (host->dev->of_node)
3499 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3503 switch (bus_width) {
3505 mmc->caps |= MMC_CAP_8_BIT_DATA;
3507 mmc->caps |= MMC_CAP_4_BIT_DATA;
3510 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3511 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3512 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3513 mmc->caps |= MMC_CAP_SDIO_IRQ;
3514 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3515 mmc->caps |= MMC_CAP_HW_RESET;
3516 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3517 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3518 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3519 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3520 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3521 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3522 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3523 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3525 /*Assign pm_caps pass to pm_flags*/
3526 mmc->pm_flags = mmc->pm_caps;
3528 if (host->pdata->blk_settings) {
3529 mmc->max_segs = host->pdata->blk_settings->max_segs;
3530 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3531 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3532 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3533 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3535 /* Useful defaults if platform data is unset. */
3536 #ifdef CONFIG_MMC_DW_IDMAC
3537 mmc->max_segs = host->ring_size;
3538 mmc->max_blk_size = 65536;
3539 mmc->max_blk_count = host->ring_size;
3540 mmc->max_seg_size = 0x1000;
3541 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3542 if(cpu_is_rk3036() || cpu_is_rk312x()){
3543 /* fixup for external dmac setting */
3545 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3546 mmc->max_blk_count = 65535;
3547 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3548 mmc->max_seg_size = mmc->max_req_size;
3552 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3553 mmc->max_blk_count = 512;
3554 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3555 mmc->max_seg_size = mmc->max_req_size;
3556 #endif /* CONFIG_MMC_DW_IDMAC */
3560 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3562 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3567 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3568 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3572 if (IS_ERR(host->vmmc)) {
3573 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3576 ret = regulator_enable(host->vmmc);
3579 "failed to enable regulator: %d\n", ret);
3586 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3588 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3589 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3591 dw_mci_init_pinctrl(host);
3592 ret = mmc_add_host(mmc);
3596 #if defined(CONFIG_DEBUG_FS)
3597 dw_mci_init_debugfs(slot);
3600 /* Card initially undetected */
3601 slot->last_detect_state = 1;
3605 unregister_pm_notifier(&mmc->pm_notify);
3608 if (gpio_is_valid(slot->cd_gpio))
3609 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3614 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3616 /* Shutdown detect IRQ */
3617 if (slot->host->pdata->exit)
3618 slot->host->pdata->exit(id);
3620 /* Debugfs stuff is cleaned up by mmc core */
3621 mmc_remove_host(slot->mmc);
3622 slot->host->slot[id] = NULL;
3623 mmc_free_host(slot->mmc);
3626 static void dw_mci_init_dma(struct dw_mci *host)
3628 /* Alloc memory for sg translation */
3629 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3630 &host->sg_dma, GFP_KERNEL);
3631 if (!host->sg_cpu) {
3632 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3637 /* Determine which DMA interface to use */
3638 #if defined(CONFIG_MMC_DW_IDMAC)
3639 if(cpu_is_rk3036() || cpu_is_rk312x()){
3640 host->dma_ops = &dw_mci_edmac_ops;
3641 dev_info(host->dev, "Using external DMA controller.\n");
3643 host->dma_ops = &dw_mci_idmac_ops;
3644 dev_info(host->dev, "Using internal DMA controller.\n");
3651 if (host->dma_ops->init && host->dma_ops->start &&
3652 host->dma_ops->stop && host->dma_ops->cleanup) {
3653 if (host->dma_ops->init(host)) {
3654 dev_err(host->dev, "%s: Unable to initialize "
3655 "DMA Controller.\n", __func__);
3659 dev_err(host->dev, "DMA initialization not found.\n");
3667 dev_info(host->dev, "Using PIO mode.\n");
3672 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3674 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3677 ctrl = mci_readl(host, CTRL);
3679 mci_writel(host, CTRL, ctrl);
3681 /* wait till resets clear */
3683 ctrl = mci_readl(host, CTRL);
3684 if (!(ctrl & reset))
3686 } while (time_before(jiffies, timeout));
3689 "Timeout resetting block (ctrl reset %#x)\n",
3695 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3698 * Reseting generates a block interrupt, hence setting
3699 * the scatter-gather pointer to NULL.
3702 sg_miter_stop(&host->sg_miter);
3706 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3709 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3711 return dw_mci_ctrl_reset(host,
3712 SDMMC_CTRL_FIFO_RESET |
3714 SDMMC_CTRL_DMA_RESET);
3719 static struct dw_mci_of_quirks {
3724 .quirk = "broken-cd",
3725 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3729 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3731 struct dw_mci_board *pdata;
3732 struct device *dev = host->dev;
3733 struct device_node *np = dev->of_node;
3734 const struct dw_mci_drv_data *drv_data = host->drv_data;
3736 u32 clock_frequency;
3738 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3740 dev_err(dev, "could not allocate memory for pdata\n");
3741 return ERR_PTR(-ENOMEM);
3744 /* find out number of slots supported */
3745 if (of_property_read_u32(dev->of_node, "num-slots",
3746 &pdata->num_slots)) {
3747 dev_info(dev, "num-slots property not found, "
3748 "assuming 1 slot is available\n");
3749 pdata->num_slots = 1;
3753 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3754 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3755 pdata->quirks |= of_quirks[idx].id;
3758 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3759 dev_info(dev, "fifo-depth property not found, using "
3760 "value of FIFOTH register as default\n");
3762 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3764 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3765 pdata->bus_hz = clock_frequency;
3767 if (drv_data && drv_data->parse_dt) {
3768 ret = drv_data->parse_dt(host);
3770 return ERR_PTR(ret);
3773 if (of_find_property(np, "keep-power-in-suspend", NULL))
3774 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3776 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3777 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3779 if (of_find_property(np, "supports-highspeed", NULL))
3780 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3782 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3783 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3785 if (of_find_property(np, "supports-DDR_MODE", NULL))
3786 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3788 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3789 pdata->caps2 |= MMC_CAP2_HS200;
3791 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3792 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3794 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3795 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3797 if (of_get_property(np, "cd-inverted", NULL))
3798 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3799 if (of_get_property(np, "bootpart-no-access", NULL))
3800 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3805 #else /* CONFIG_OF */
3806 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3808 return ERR_PTR(-EINVAL);
3810 #endif /* CONFIG_OF */
3812 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3817 dev_err(host->dev, "host->state = 0x%x\n", host->state);
3818 switch(host->state){
3821 case STATE_SENDING_DATA:
3822 case STATE_DATA_BUSY:
3823 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3824 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3825 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3826 host->state = STATE_DATA_BUSY;
3827 if (!dw_mci_ctrl_all_reset(host)) {
3828 dev_err(host->dev, "dto: ctrl_all_reset failed!\n");
3832 /* NO requirement to reclaim slave chn using external dmac */
3833 #ifdef CONFIG_MMC_DW_IDMAC
3834 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3835 if (host->use_dma && host->dma_ops->init)
3836 host->dma_ops->init(host);
3840 * Restore the initial value at FIFOTH register
3841 * And Invalidate the prev_blksz with zero
3843 mci_writel(host, FIFOTH, host->fifoth_val);
3844 host->prev_blksz = 0;
3845 mci_writel(host, TMOUT, 0xFFFFFFFF);
3846 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3847 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3848 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3849 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3850 regs |= SDMMC_INT_CD;
3852 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
3853 if (host->verid < DW_MMC_240A)
3854 sdio_int = SDMMC_INT_SDIO(0);
3856 sdio_int = SDMMC_INT_SDIO(8);
3858 if (mci_readl(host, INTMASK) & sdio_int)
3862 mci_writel(host, INTMASK, regs);
3863 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3864 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3865 tasklet_schedule(&host->tasklet);
3871 static void dw_mci_dto_timeout(unsigned long host_data)
3873 struct dw_mci *host = (struct dw_mci *) host_data;
3875 disable_irq(host->irq);
3877 dev_err(host->dev, "data_over interrupt timeout!\n");
3878 host->data_status = SDMMC_INT_EBE;
3879 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3880 dw_mci_dealwith_timeout(host);
3882 enable_irq(host->irq);
3885 int dw_mci_probe(struct dw_mci *host)
3887 const struct dw_mci_drv_data *drv_data = host->drv_data;
3888 int width, i, ret = 0;
3894 host->pdata = dw_mci_parse_dt(host);
3895 if (IS_ERR(host->pdata)) {
3896 dev_err(host->dev, "platform data not available\n");
3901 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3903 "Platform data must supply select_slot function\n");
3908 * In 2.40a spec, Data offset is changed.
3909 * Need to check the version-id and set data-offset for DATA register.
3911 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3912 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3914 if (host->verid < DW_MMC_240A)
3915 host->data_offset = DATA_OFFSET;
3917 host->data_offset = DATA_240A_OFFSET;
3920 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3921 if (IS_ERR(host->hclk_mmc)) {
3922 dev_err(host->dev, "failed to get hclk_mmc\n");
3923 ret = PTR_ERR(host->hclk_mmc);
3927 clk_prepare_enable(host->hclk_mmc);
3930 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3931 if (IS_ERR(host->clk_mmc)) {
3932 dev_err(host->dev, "failed to get clk mmc_per\n");
3933 ret = PTR_ERR(host->clk_mmc);
3937 host->bus_hz = host->pdata->bus_hz;
3938 if (!host->bus_hz) {
3939 dev_err(host->dev,"Platform data must supply bus speed\n");
3944 if (host->verid < DW_MMC_240A)
3945 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3947 //rockchip: fix divider 2 in clksum before controlller
3948 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3951 dev_err(host->dev, "failed to set clk mmc\n");
3954 clk_prepare_enable(host->clk_mmc);
3956 if (drv_data && drv_data->setup_clock) {
3957 ret = drv_data->setup_clock(host);
3960 "implementation specific clock setup failed\n");
3965 host->quirks = host->pdata->quirks;
3966 host->irq_state = true;
3967 host->set_speed = 0;
3969 host->svi_flags = 0;
3971 spin_lock_init(&host->lock);
3972 spin_lock_init(&host->slock);
3974 INIT_LIST_HEAD(&host->queue);
3976 * Get the host data width - this assumes that HCON has been set with
3977 * the correct values.
3979 i = (mci_readl(host, HCON) >> 7) & 0x7;
3981 host->push_data = dw_mci_push_data16;
3982 host->pull_data = dw_mci_pull_data16;
3984 host->data_shift = 1;
3985 } else if (i == 2) {
3986 host->push_data = dw_mci_push_data64;
3987 host->pull_data = dw_mci_pull_data64;
3989 host->data_shift = 3;
3991 /* Check for a reserved value, and warn if it is */
3993 "HCON reports a reserved host data width!\n"
3994 "Defaulting to 32-bit access.\n");
3995 host->push_data = dw_mci_push_data32;
3996 host->pull_data = dw_mci_pull_data32;
3998 host->data_shift = 2;
4001 /* Reset all blocks */
4002 if (!dw_mci_ctrl_all_reset(host))
4005 host->dma_ops = host->pdata->dma_ops;
4006 dw_mci_init_dma(host);
4008 /* Clear the interrupts for the host controller */
4009 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4010 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4012 /* Put in max timeout */
4013 mci_writel(host, TMOUT, 0xFFFFFFFF);
4016 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4017 * Tx Mark = fifo_size / 2 DMA Size = 8
4019 if (!host->pdata->fifo_depth) {
4021 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4022 * have been overwritten by the bootloader, just like we're
4023 * about to do, so if you know the value for your hardware, you
4024 * should put it in the platform data.
4026 fifo_size = mci_readl(host, FIFOTH);
4027 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4029 fifo_size = host->pdata->fifo_depth;
4031 host->fifo_depth = fifo_size;
4033 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4034 mci_writel(host, FIFOTH, host->fifoth_val);
4036 /* disable clock to CIU */
4037 mci_writel(host, CLKENA, 0);
4038 mci_writel(host, CLKSRC, 0);
4040 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4041 host->card_workqueue = alloc_workqueue("dw-mci-card",
4042 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4043 if (!host->card_workqueue) {
4047 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4048 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4049 host->irq_flags, "dw-mci", host);
4053 if (host->pdata->num_slots)
4054 host->num_slots = host->pdata->num_slots;
4056 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4058 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
4059 /* We need at least one slot to succeed */
4060 for (i = 0; i < host->num_slots; i++) {
4061 ret = dw_mci_init_slot(host, i);
4063 dev_dbg(host->dev, "slot %d init failed\n", i);
4069 * Enable interrupts for command done, data over, data empty, card det,
4070 * receive ready and error such as transmit, receive timeout, crc error
4072 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4073 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4074 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4075 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4076 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4077 regs |= SDMMC_INT_CD;
4079 mci_writel(host, INTMASK, regs);
4081 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4083 dev_info(host->dev, "DW MMC controller at irq %d, "
4084 "%d bit host data width, "
4086 host->irq, width, fifo_size);
4089 dev_info(host->dev, "%d slots initialized\n", init_slots);
4091 dev_dbg(host->dev, "attempted to initialize %d slots, "
4092 "but failed on all\n", host->num_slots);
4097 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4098 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4103 destroy_workqueue(host->card_workqueue);
4106 if (host->use_dma && host->dma_ops->exit)
4107 host->dma_ops->exit(host);
4110 regulator_disable(host->vmmc);
4111 regulator_put(host->vmmc);
4115 if (!IS_ERR(host->clk_mmc))
4116 clk_disable_unprepare(host->clk_mmc);
4118 if (!IS_ERR(host->hclk_mmc))
4119 clk_disable_unprepare(host->hclk_mmc);
4123 EXPORT_SYMBOL(dw_mci_probe);
4125 void dw_mci_remove(struct dw_mci *host)
4127 struct mmc_host *mmc = host->mmc;
4128 struct dw_mci_slot *slot = mmc_priv(mmc);
4131 del_timer_sync(&host->dto_timer);
4133 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4134 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4136 for(i = 0; i < host->num_slots; i++){
4137 dev_dbg(host->dev, "remove slot %d\n", i);
4139 dw_mci_cleanup_slot(host->slot[i], i);
4142 /* disable clock to CIU */
4143 mci_writel(host, CLKENA, 0);
4144 mci_writel(host, CLKSRC, 0);
4146 destroy_workqueue(host->card_workqueue);
4147 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4148 unregister_pm_notifier(&host->mmc->pm_notify);
4150 if(host->use_dma && host->dma_ops->exit)
4151 host->dma_ops->exit(host);
4153 if (gpio_is_valid(slot->cd_gpio))
4154 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4157 regulator_disable(host->vmmc);
4158 regulator_put(host->vmmc);
4160 if(!IS_ERR(host->clk_mmc))
4161 clk_disable_unprepare(host->clk_mmc);
4163 if(!IS_ERR(host->hclk_mmc))
4164 clk_disable_unprepare(host->hclk_mmc);
4166 EXPORT_SYMBOL(dw_mci_remove);
4170 #ifdef CONFIG_PM_SLEEP
4172 * TODO: we should probably disable the clock to the card in the suspend path.
4174 extern int get_wifi_chip_type(void);
4175 int dw_mci_suspend(struct dw_mci *host)
4177 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4178 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4182 regulator_disable(host->vmmc);
4184 /*only for sdmmc controller*/
4185 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4186 disable_irq(host->irq);
4187 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4188 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4189 mmc_hostname(host->mmc));
4191 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4192 mci_writel(host, INTMASK, 0x00);
4193 mci_writel(host, CTRL, 0x00);
4195 /* Soc rk3126 already in gpio_cd mode */
4196 if (!soc_is_rk3126() && !soc_is_rk3126b()) {
4197 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4198 enable_irq_wake(host->mmc->slot.cd_irq);
4203 EXPORT_SYMBOL(dw_mci_suspend);
4205 int dw_mci_resume(struct dw_mci *host)
4207 int i, ret, retry_cnt = 0;
4209 struct dw_mci_slot *slot;
4211 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4212 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4217 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4218 slot = mmc_priv(host->mmc);
4219 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4223 /*only for sdmmc controller*/
4224 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4225 /* Soc rk3126 already in gpio_cd mode */
4226 if (!soc_is_rk3126() && !soc_is_rk3126b()) {
4227 disable_irq_wake(host->mmc->slot.cd_irq);
4228 mmc_gpio_free_cd(host->mmc);
4230 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4231 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4232 mmc_hostname(host->mmc));
4236 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4237 else if(cpu_is_rk3036())
4238 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4239 else if(cpu_is_rk312x())
4240 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4241 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4244 ret = regulator_enable(host->vmmc);
4247 "failed to enable regulator: %d\n", ret);
4252 if(!dw_mci_ctrl_all_reset(host)){
4257 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4258 if(host->use_dma && host->dma_ops->init)
4259 host->dma_ops->init(host);
4262 * Restore the initial value at FIFOTH register
4263 * And Invalidate the prev_blksz with zero
4265 mci_writel(host, FIFOTH, host->fifoth_val);
4266 host->prev_blksz = 0;
4267 /* Put in max timeout */
4268 mci_writel(host, TMOUT, 0xFFFFFFFF);
4270 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4271 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4273 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4274 regs |= SDMMC_INT_CD;
4275 mci_writel(host, INTMASK, regs);
4276 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4277 /*only for sdmmc controller*/
4278 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4279 enable_irq(host->irq);
4282 for(i = 0; i < host->num_slots; i++){
4283 struct dw_mci_slot *slot = host->slot[i];
4286 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4287 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4288 dw_mci_setup_bus(slot, true);
4294 EXPORT_SYMBOL(dw_mci_resume);
4295 #endif /* CONFIG_PM_SLEEP */
4297 static int __init dw_mci_init(void)
4299 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4303 static void __exit dw_mci_exit(void)
4307 module_init(dw_mci_init);
4308 module_exit(dw_mci_exit);
4310 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4311 MODULE_AUTHOR("NXP Semiconductor VietNam");
4312 MODULE_AUTHOR("Imagination Technologies Ltd");
4313 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4314 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4315 MODULE_LICENSE("GPL v2");