2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
52 #include "rk_sdmmc_dbg.h"
53 #include <linux/regulator/rockchip_io_vol_domain.h>
54 #include "../../clk/rockchip/clk-ops.h"
56 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
58 /* Common flag combinations */
59 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
60 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
62 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
64 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
65 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
66 #define DW_MCI_SEND_STATUS 1
67 #define DW_MCI_RECV_STATUS 2
68 #define DW_MCI_DMA_THRESHOLD 16
70 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
71 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
73 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
74 #define SDMMC_DATA_TIMEOUT_SD 500
75 #define SDMMC_DATA_TIMEOUT_SDIO 250
76 #define SDMMC_DATA_TIMEOUT_EMMC 2500
78 #define SDMMC_CMD_RTO_MAX_HOLD 200
79 #define SDMMC_WAIT_FOR_UNBUSY 2500
81 #ifdef CONFIG_MMC_DW_IDMAC
82 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
83 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
84 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
88 u32 des0; /* Control Descriptor */
89 #define IDMAC_DES0_DIC BIT(1)
90 #define IDMAC_DES0_LD BIT(2)
91 #define IDMAC_DES0_FD BIT(3)
92 #define IDMAC_DES0_CH BIT(4)
93 #define IDMAC_DES0_ER BIT(5)
94 #define IDMAC_DES0_CES BIT(30)
95 #define IDMAC_DES0_OWN BIT(31)
97 u32 des1; /* Buffer sizes */
98 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
99 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
101 u32 des2; /* buffer 1 physical address */
103 u32 des3; /* buffer 2 physical address */
105 #endif /* CONFIG_MMC_DW_IDMAC */
107 static const u8 tuning_blk_pattern_4bit[] = {
108 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
109 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
110 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
111 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
112 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
113 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
114 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
115 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
118 static const u8 tuning_blk_pattern_8bit[] = {
119 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
120 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
121 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
122 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
123 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
124 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
125 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
126 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
127 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
128 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
129 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
130 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
131 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
132 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
133 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
134 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
137 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
138 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
139 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
140 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
142 /*printk the all register of current host*/
144 static int dw_mci_regs_printk(struct dw_mci *host)
146 struct sdmmc_reg *regs = dw_mci_regs;
148 while( regs->name != 0 ){
149 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
152 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
157 #if defined(CONFIG_DEBUG_FS)
158 static int dw_mci_req_show(struct seq_file *s, void *v)
160 struct dw_mci_slot *slot = s->private;
161 struct mmc_request *mrq;
162 struct mmc_command *cmd;
163 struct mmc_command *stop;
164 struct mmc_data *data;
166 /* Make sure we get a consistent snapshot */
167 spin_lock_bh(&slot->host->lock);
177 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
178 cmd->opcode, cmd->arg, cmd->flags,
179 cmd->resp[0], cmd->resp[1], cmd->resp[2],
180 cmd->resp[2], cmd->error);
182 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
183 data->bytes_xfered, data->blocks,
184 data->blksz, data->flags, data->error);
187 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
188 stop->opcode, stop->arg, stop->flags,
189 stop->resp[0], stop->resp[1], stop->resp[2],
190 stop->resp[2], stop->error);
193 spin_unlock_bh(&slot->host->lock);
198 static int dw_mci_req_open(struct inode *inode, struct file *file)
200 return single_open(file, dw_mci_req_show, inode->i_private);
203 static const struct file_operations dw_mci_req_fops = {
204 .owner = THIS_MODULE,
205 .open = dw_mci_req_open,
208 .release = single_release,
211 static int dw_mci_regs_show(struct seq_file *s, void *v)
213 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
214 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
215 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
216 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
217 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
218 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
223 static int dw_mci_regs_open(struct inode *inode, struct file *file)
225 return single_open(file, dw_mci_regs_show, inode->i_private);
228 static const struct file_operations dw_mci_regs_fops = {
229 .owner = THIS_MODULE,
230 .open = dw_mci_regs_open,
233 .release = single_release,
236 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
238 struct mmc_host *mmc = slot->mmc;
239 struct dw_mci *host = slot->host;
243 root = mmc->debugfs_root;
247 node = debugfs_create_file("regs", S_IRUSR, root, host,
252 node = debugfs_create_file("req", S_IRUSR, root, slot,
257 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
261 node = debugfs_create_x32("pending_events", S_IRUSR, root,
262 (u32 *)&host->pending_events);
266 node = debugfs_create_x32("completed_events", S_IRUSR, root,
267 (u32 *)&host->completed_events);
274 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
276 #endif /* defined(CONFIG_DEBUG_FS) */
278 static void dw_mci_set_timeout(struct dw_mci *host)
280 /* timeout (maximum) */
281 mci_writel(host, TMOUT, 0xffffffff);
284 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
286 struct mmc_data *data;
287 struct dw_mci_slot *slot = mmc_priv(mmc);
288 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
290 cmd->error = -EINPROGRESS;
294 if (cmdr == MMC_STOP_TRANSMISSION)
295 cmdr |= SDMMC_CMD_STOP;
297 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
299 if (cmd->flags & MMC_RSP_PRESENT) {
300 /* We expect a response, so set this bit */
301 cmdr |= SDMMC_CMD_RESP_EXP;
302 if (cmd->flags & MMC_RSP_136)
303 cmdr |= SDMMC_CMD_RESP_LONG;
306 if (cmd->flags & MMC_RSP_CRC)
307 cmdr |= SDMMC_CMD_RESP_CRC;
311 cmdr |= SDMMC_CMD_DAT_EXP;
312 if (data->flags & MMC_DATA_STREAM)
313 cmdr |= SDMMC_CMD_STRM_MODE;
314 if (data->flags & MMC_DATA_WRITE)
315 cmdr |= SDMMC_CMD_DAT_WR;
318 if (drv_data && drv_data->prepare_command)
319 drv_data->prepare_command(slot->host, &cmdr);
325 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
327 struct mmc_command *stop;
333 stop = &host->stop_abort;
335 memset(stop, 0, sizeof(struct mmc_command));
337 if (cmdr == MMC_READ_SINGLE_BLOCK ||
338 cmdr == MMC_READ_MULTIPLE_BLOCK ||
339 cmdr == MMC_WRITE_BLOCK ||
340 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
341 stop->opcode = MMC_STOP_TRANSMISSION;
343 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
344 } else if (cmdr == SD_IO_RW_EXTENDED) {
345 stop->opcode = SD_IO_RW_DIRECT;
346 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
347 ((cmd->arg >> 28) & 0x7);
348 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
353 cmdr = stop->opcode | SDMMC_CMD_STOP |
354 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
359 static void dw_mci_start_command(struct dw_mci *host,
360 struct mmc_command *cmd, u32 cmd_flags)
362 struct dw_mci_slot *slot = host->slot[0];
363 /*temporality fix slot[0] due to host->num_slots equal to 1*/
365 host->pre_cmd = host->cmd;
368 "start command: ARGR=0x%08x CMDR=0x%08x\n",
369 cmd->arg, cmd_flags);
371 if(SD_SWITCH_VOLTAGE == cmd->opcode){
372 /*confirm non-low-power mode*/
373 mci_writel(host, CMDARG, 0);
374 dw_mci_disable_low_power(slot);
376 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
377 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
379 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
382 mci_writel(host, CMDARG, cmd->arg);
385 /* fix the value to 1 in some Soc,for example RK3188. */
386 if(host->mmc->hold_reg_flag)
387 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
389 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
393 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
395 dw_mci_start_command(host, data->stop, host->stop_cmdr);
398 /* DMA interface functions */
399 static void dw_mci_stop_dma(struct dw_mci *host)
401 if (host->using_dma) {
402 /* Fixme: No need to terminate edma, may cause flush op */
403 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
404 host->dma_ops->stop(host);
405 host->dma_ops->cleanup(host);
408 /* Data transfer was stopped by the interrupt handler */
409 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
412 static int dw_mci_get_dma_dir(struct mmc_data *data)
414 if (data->flags & MMC_DATA_WRITE)
415 return DMA_TO_DEVICE;
417 return DMA_FROM_DEVICE;
420 #ifdef CONFIG_MMC_DW_IDMAC
421 static void dw_mci_dma_cleanup(struct dw_mci *host)
423 struct mmc_data *data = host->data;
426 if (!data->host_cookie)
427 dma_unmap_sg(host->dev,
430 dw_mci_get_dma_dir(data));
433 static void dw_mci_idmac_reset(struct dw_mci *host)
435 u32 bmod = mci_readl(host, BMOD);
436 /* Software reset of DMA */
437 bmod |= SDMMC_IDMAC_SWRESET;
438 mci_writel(host, BMOD, bmod);
441 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
445 /* Disable and reset the IDMAC interface */
446 temp = mci_readl(host, CTRL);
447 temp &= ~SDMMC_CTRL_USE_IDMAC;
448 temp |= SDMMC_CTRL_DMA_RESET;
449 mci_writel(host, CTRL, temp);
451 /* Stop the IDMAC running */
452 temp = mci_readl(host, BMOD);
453 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
454 temp |= SDMMC_IDMAC_SWRESET;
455 mci_writel(host, BMOD, temp);
458 static void dw_mci_idmac_complete_dma(void *arg)
460 struct dw_mci *host = arg;
461 struct mmc_data *data = host->data;
463 dev_vdbg(host->dev, "DMA complete\n");
466 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
467 host->mrq->cmd->opcode,host->mrq->cmd->arg,
468 data->blocks,data->blksz,mmc_hostname(host->mmc));
471 host->dma_ops->cleanup(host);
474 * If the card was removed, data will be NULL. No point in trying to
475 * send the stop command or waiting for NBUSY in this case.
478 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
479 tasklet_schedule(&host->tasklet);
483 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
487 struct idmac_desc *desc = host->sg_cpu;
489 for (i = 0; i < sg_len; i++, desc++) {
490 unsigned int length = sg_dma_len(&data->sg[i]);
491 u32 mem_addr = sg_dma_address(&data->sg[i]);
493 /* Set the OWN bit and disable interrupts for this descriptor */
494 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
497 IDMAC_SET_BUFFER1_SIZE(desc, length);
499 /* Physical address to DMA to/from */
500 desc->des2 = mem_addr;
503 /* Set first descriptor */
505 desc->des0 |= IDMAC_DES0_FD;
507 /* Set last descriptor */
508 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
509 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
510 desc->des0 |= IDMAC_DES0_LD;
515 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
519 dw_mci_translate_sglist(host, host->data, sg_len);
521 /* Select IDMAC interface */
522 temp = mci_readl(host, CTRL);
523 temp |= SDMMC_CTRL_USE_IDMAC;
524 mci_writel(host, CTRL, temp);
528 /* Enable the IDMAC */
529 temp = mci_readl(host, BMOD);
530 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
531 mci_writel(host, BMOD, temp);
533 /* Start it running */
534 mci_writel(host, PLDMND, 1);
537 static int dw_mci_idmac_init(struct dw_mci *host)
539 struct idmac_desc *p;
542 /* Number of descriptors in the ring buffer */
543 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
545 /* Forward link the descriptor list */
546 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
547 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
549 /* Set the last descriptor as the end-of-ring descriptor */
550 p->des3 = host->sg_dma;
551 p->des0 = IDMAC_DES0_ER;
553 dw_mci_idmac_reset(host);
555 /* Mask out interrupts - get Tx & Rx complete only */
556 mci_writel(host, IDSTS, IDMAC_INT_CLR);
557 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
560 /* Set the descriptor base address */
561 mci_writel(host, DBADDR, host->sg_dma);
565 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
566 .init = dw_mci_idmac_init,
567 .start = dw_mci_idmac_start_dma,
568 .stop = dw_mci_idmac_stop_dma,
569 .complete = dw_mci_idmac_complete_dma,
570 .cleanup = dw_mci_dma_cleanup,
574 static void dw_mci_edma_cleanup(struct dw_mci *host)
576 struct mmc_data *data = host->data;
579 if (!data->host_cookie)
580 dma_unmap_sg(host->dev,
581 data->sg, data->sg_len,
582 dw_mci_get_dma_dir(data));
585 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
587 dmaengine_terminate_all(host->dms->ch);
590 static void dw_mci_edmac_complete_dma(void *arg)
592 struct dw_mci *host = arg;
593 struct mmc_data *data = host->data;
595 dev_vdbg(host->dev, "DMA complete\n");
598 if(data->flags & MMC_DATA_READ)
599 /* Invalidate cache after read */
600 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
601 data->sg_len, DMA_FROM_DEVICE);
603 host->dma_ops->cleanup(host);
606 * If the card was removed, data will be NULL. No point in trying to
607 * send the stop command or waiting for NBUSY in this case.
610 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
611 tasklet_schedule(&host->tasklet);
615 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
617 struct dma_slave_config slave_config;
618 struct dma_async_tx_descriptor *desc = NULL;
619 struct scatterlist *sgl = host->data->sg;
620 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
621 u32 sg_elems = host->data->sg_len;
622 u32 fifoth_val, mburst;
626 /* Set external dma config: burst size, burst width*/
627 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
628 slave_config.src_addr = slave_config.dst_addr;
629 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
630 slave_config.src_addr_width = slave_config.dst_addr_width;
632 /* Match FIFO dma burst MSIZE with external dma config*/
633 fifoth_val = mci_readl(host, FIFOTH);
634 mburst = mszs[(fifoth_val >> 28) & 0x7];
636 /* edmac limit burst to 16, but work around for rk3036 to 8 */
637 if (unlikely(cpu_is_rk3036()))
642 if (mburst > burst_limit) {
643 mburst = burst_limit;
644 fifoth_val = SDMMC_SET_FIFOTH(mszs[3], mszs[3] - 1, (host->fifo_depth) / 2);
645 mci_writel(host, FIFOTH, fifoth_val);
648 slave_config.dst_maxburst = mburst;
649 slave_config.src_maxburst = slave_config.dst_maxburst;
651 if(host->data->flags & MMC_DATA_WRITE){
652 slave_config.direction = DMA_MEM_TO_DEV;
653 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
655 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* Set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
670 /* Flush cache before write */
671 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
672 sg_elems, DMA_TO_DEVICE);
673 dma_async_issue_pending(host->dms->ch);
676 slave_config.direction = DMA_DEV_TO_MEM;
677 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
679 dev_err(host->dev, "error in dw_mci edma configuration.\n");
682 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
683 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
685 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
688 /* set dw_mci_edmac_complete_dma as callback */
689 desc->callback = dw_mci_edmac_complete_dma;
690 desc->callback_param = (void *)host;
691 dmaengine_submit(desc);
692 dma_async_issue_pending(host->dms->ch);
696 static int dw_mci_edmac_init(struct dw_mci *host)
698 /* Request external dma channel, SHOULD decide chn in dts */
700 host->dms = (struct dw_mci_dma_slave *)kmalloc
701 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
702 if (NULL == host->dms) {
703 dev_err(host->dev, "No enough memory to alloc dms.\n");
707 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
708 if (!host->dms->ch) {
709 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
710 host->dms->ch->chan_id);
717 if (NULL != host->dms) {
725 static void dw_mci_edmac_exit(struct dw_mci *host)
727 if (NULL != host->dms) {
728 if (NULL != host->dms->ch) {
729 dma_release_channel(host->dms->ch);
730 host->dms->ch = NULL;
737 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
738 .init = dw_mci_edmac_init,
739 .exit = dw_mci_edmac_exit,
740 .start = dw_mci_edmac_start_dma,
741 .stop = dw_mci_edmac_stop_dma,
742 .complete = dw_mci_edmac_complete_dma,
743 .cleanup = dw_mci_edma_cleanup,
745 #endif /* CONFIG_MMC_DW_IDMAC */
747 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
748 struct mmc_data *data,
751 struct scatterlist *sg;
752 unsigned int i, sg_len;
754 if (!next && data->host_cookie)
755 return data->host_cookie;
758 * We don't do DMA on "complex" transfers, i.e. with
759 * non-word-aligned buffers or lengths. Also, we don't bother
760 * with all the DMA setup overhead for short transfers.
762 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
768 for_each_sg(data->sg, sg, data->sg_len, i) {
769 if (sg->offset & 3 || sg->length & 3)
773 sg_len = dma_map_sg(host->dev,
776 dw_mci_get_dma_dir(data));
781 data->host_cookie = sg_len;
786 static void dw_mci_pre_req(struct mmc_host *mmc,
787 struct mmc_request *mrq,
790 struct dw_mci_slot *slot = mmc_priv(mmc);
791 struct mmc_data *data = mrq->data;
793 if (!slot->host->use_dma || !data)
796 if (data->host_cookie) {
797 data->host_cookie = 0;
801 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
802 data->host_cookie = 0;
805 static void dw_mci_post_req(struct mmc_host *mmc,
806 struct mmc_request *mrq,
809 struct dw_mci_slot *slot = mmc_priv(mmc);
810 struct mmc_data *data = mrq->data;
812 if (!slot->host->use_dma || !data)
815 if (data->host_cookie)
816 dma_unmap_sg(slot->host->dev,
819 dw_mci_get_dma_dir(data));
820 data->host_cookie = 0;
823 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
825 #ifdef CONFIG_MMC_DW_IDMAC
826 unsigned int blksz = data->blksz;
827 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
828 u32 fifo_width = 1 << host->data_shift;
829 u32 blksz_depth = blksz / fifo_width, fifoth_val;
830 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
831 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
833 tx_wmark = (host->fifo_depth) / 2;
834 tx_wmark_invers = host->fifo_depth - tx_wmark;
838 * if blksz is not a multiple of the FIFO width
840 if (blksz % fifo_width) {
847 if (!((blksz_depth % mszs[idx]) ||
848 (tx_wmark_invers % mszs[idx]))) {
850 rx_wmark = mszs[idx] - 1;
855 * If idx is '0', it won't be tried
856 * Thus, initial values are uesed
859 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
860 mci_writel(host, FIFOTH, fifoth_val);
865 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
867 unsigned int blksz = data->blksz;
868 u32 blksz_depth, fifo_depth;
871 WARN_ON(!(data->flags & MMC_DATA_READ));
873 if (host->timing != MMC_TIMING_MMC_HS200 &&
874 host->timing != MMC_TIMING_UHS_SDR104)
877 blksz_depth = blksz / (1 << host->data_shift);
878 fifo_depth = host->fifo_depth;
880 if (blksz_depth > fifo_depth)
884 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
885 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
886 * Currently just choose blksz.
889 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
893 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
896 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
904 /* If we don't have a channel, we can't do DMA */
908 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
910 /* Fixme: No need terminate edma, may cause flush op */
911 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
912 host->dma_ops->stop(host);
919 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
920 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
924 * Decide the MSIZE and RX/TX Watermark.
925 * If current block size is same with previous size,
926 * no need to update fifoth.
928 if (host->prev_blksz != data->blksz)
929 dw_mci_adjust_fifoth(host, data);
932 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
934 /* Enable the DMA interface */
935 temp = mci_readl(host, CTRL);
936 temp |= SDMMC_CTRL_DMA_ENABLE;
937 mci_writel(host, CTRL, temp);
939 /* Disable RX/TX IRQs, let DMA handle it */
940 spin_lock_irqsave(&host->slock, flags);
941 temp = mci_readl(host, INTMASK);
942 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
943 mci_writel(host, INTMASK, temp);
944 spin_unlock_irqrestore(&host->slock, flags);
946 host->dma_ops->start(host, sg_len);
951 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
956 data->error = -EINPROGRESS;
958 //WARN_ON(host->data);
963 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
965 if (data->flags & MMC_DATA_READ) {
966 host->dir_status = DW_MCI_RECV_STATUS;
967 dw_mci_ctrl_rd_thld(host, data);
969 host->dir_status = DW_MCI_SEND_STATUS;
972 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
973 data->blocks, data->blksz, mmc_hostname(host->mmc));
975 if (dw_mci_submit_data_dma(host, data)) {
976 int flags = SG_MITER_ATOMIC;
977 if (host->data->flags & MMC_DATA_READ)
978 flags |= SG_MITER_TO_SG;
980 flags |= SG_MITER_FROM_SG;
982 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
984 host->part_buf_start = 0;
985 host->part_buf_count = 0;
987 spin_lock_irqsave(&host->slock, flag);
988 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
989 temp = mci_readl(host, INTMASK);
990 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
991 mci_writel(host, INTMASK, temp);
992 spin_unlock_irqrestore(&host->slock, flag);
994 temp = mci_readl(host, CTRL);
995 temp &= ~SDMMC_CTRL_DMA_ENABLE;
996 mci_writel(host, CTRL, temp);
999 * Use the initial fifoth_val for PIO mode.
1000 * If next issued data may be transfered by DMA mode,
1001 * prev_blksz should be invalidated.
1003 mci_writel(host, FIFOTH, host->fifoth_val);
1004 host->prev_blksz = 0;
1007 * Keep the current block size.
1008 * It will be used to decide whether to update
1009 * fifoth register next time.
1011 host->prev_blksz = data->blksz;
1015 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1017 struct dw_mci *host = slot->host;
1018 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1019 unsigned int cmd_status = 0;
1020 #ifdef SDMMC_WAIT_FOR_UNBUSY
1022 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1024 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1026 ret = time_before(jiffies, timeout);
1027 cmd_status = mci_readl(host, STATUS);
1028 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1032 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1033 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1036 mci_writel(host, CMDARG, arg);
1038 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1039 if(cmd & SDMMC_CMD_UPD_CLK)
1040 timeout = jiffies + msecs_to_jiffies(50);
1042 timeout = jiffies + msecs_to_jiffies(500);
1043 while (time_before(jiffies, timeout)) {
1044 cmd_status = mci_readl(host, CMD);
1045 if (!(cmd_status & SDMMC_CMD_START))
1048 dev_err(&slot->mmc->class_dev,
1049 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1050 cmd, arg, cmd_status);
1053 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1055 struct dw_mci *host = slot->host;
1056 unsigned int tempck,clock = slot->clock;
1061 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1062 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1065 mci_writel(host, CLKENA, 0);
1066 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1067 if(host->svi_flags == 0)
1068 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1070 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1072 } else if (clock != host->current_speed || force_clkinit) {
1073 div = host->bus_hz / clock;
1074 if (host->bus_hz % clock && host->bus_hz > clock)
1076 * move the + 1 after the divide to prevent
1077 * over-clocking the card.
1081 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1083 if ((clock << div) != slot->__clk_old || force_clkinit) {
1084 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1085 dev_info(&slot->mmc->class_dev,
1086 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1087 slot->id, host->bus_hz, clock,
1090 host->set_speed = tempck;
1091 host->set_div = div;
1095 mci_writel(host, CLKENA, 0);
1096 mci_writel(host, CLKSRC, 0);
1100 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1102 if(clock <= 400*1000){
1103 MMC_DBG_BOOT_FUNC(host->mmc,
1104 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1105 clock * 2, mmc_hostname(host->mmc));
1106 /* clk_mmc will change parents to 24MHz xtal*/
1107 clk_set_rate(host->clk_mmc, clock * 2);
1110 host->set_div = div;
1114 MMC_DBG_BOOT_FUNC(host->mmc,
1115 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1116 mmc_hostname(host->mmc));
1119 MMC_DBG_ERR_FUNC(host->mmc,
1120 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1121 mmc_hostname(host->mmc));
1123 host->set_div = div;
1124 host->bus_hz = host->set_speed * 2;
1125 MMC_DBG_BOOT_FUNC(host->mmc,
1126 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1127 div, host->bus_hz, mmc_hostname(host->mmc));
1129 /* BUG may be here, come on, Linux BSP engineer looks!
1130 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1131 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1132 some oops happened like that:
1133 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1134 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1135 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1136 mmc0: new high speed DDR MMC card at address 0001
1137 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1139 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1140 mmcblk0: retrying using single block read
1141 mmcblk0: error -110 sending status command, retrying
1143 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1146 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1147 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1150 host->set_div = div;
1151 host->bus_hz = host->set_speed * 2;
1152 MMC_DBG_BOOT_FUNC(host->mmc,
1153 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1154 div, host->bus_hz, mmc_hostname(host->mmc));
1157 if (host->verid < DW_MMC_240A)
1158 clk_set_rate(host->clk_mmc,(host->bus_hz));
1160 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1166 /* set clock to desired speed */
1167 mci_writel(host, CLKDIV, div);
1171 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1173 /* enable clock; only low power if no SDIO */
1174 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1176 if (host->verid < DW_MMC_240A)
1177 sdio_int = SDMMC_INT_SDIO(slot->id);
1179 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1181 if (!(mci_readl(host, INTMASK) & sdio_int))
1182 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1183 mci_writel(host, CLKENA, clk_en_a);
1187 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1188 /* keep the clock with reflecting clock dividor */
1189 slot->__clk_old = clock << div;
1192 host->current_speed = clock;
1194 if(slot->ctype != slot->pre_ctype)
1195 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1197 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1198 mmc_hostname(host->mmc));
1199 slot->pre_ctype = slot->ctype;
1201 /* Set the current slot bus width */
1202 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1205 extern struct mmc_card *this_card;
1206 static void dw_mci_wait_unbusy(struct dw_mci *host)
1209 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1210 unsigned long time_loop;
1211 unsigned int status;
1214 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1216 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1217 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1218 /* Special care for (secure)erase timeout calculation */
1220 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1223 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1224 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1225 300000 * (this_card->ext_csd.sec_erase_mult)) :
1226 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1230 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1231 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1232 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1233 timeout = SDMMC_DATA_TIMEOUT_SD;
1236 time_loop = jiffies + msecs_to_jiffies(timeout);
1238 status = mci_readl(host, STATUS);
1239 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1241 } while (time_before(jiffies, time_loop));
1246 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1249 * 0--status is busy.
1250 * 1--status is unbusy.
1252 int dw_mci_card_busy(struct mmc_host *mmc)
1254 struct dw_mci_slot *slot = mmc_priv(mmc);
1255 struct dw_mci *host = slot->host;
1257 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1258 host->svi_flags, mmc_hostname(host->mmc));
1261 if(host->svi_flags == 0){
1263 host->svi_flags = 1;
1264 return host->svi_flags;
1267 host->svi_flags = 0;
1268 return host->svi_flags;
1274 static void __dw_mci_start_request(struct dw_mci *host,
1275 struct dw_mci_slot *slot,
1276 struct mmc_command *cmd)
1278 struct mmc_request *mrq;
1279 struct mmc_data *data;
1283 if (host->pdata->select_slot)
1284 host->pdata->select_slot(slot->id);
1286 host->cur_slot = slot;
1289 dw_mci_wait_unbusy(host);
1291 host->pending_events = 0;
1292 host->completed_events = 0;
1293 host->data_status = 0;
1297 dw_mci_set_timeout(host);
1298 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1299 mci_writel(host, BLKSIZ, data->blksz);
1302 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1304 /* this is the first command, send the initialization clock */
1305 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1306 cmdflags |= SDMMC_CMD_INIT;
1309 dw_mci_submit_data(host, data);
1313 dw_mci_start_command(host, cmd, cmdflags);
1316 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1319 static void dw_mci_start_request(struct dw_mci *host,
1320 struct dw_mci_slot *slot)
1322 struct mmc_request *mrq = slot->mrq;
1323 struct mmc_command *cmd;
1325 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1326 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1328 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1329 __dw_mci_start_request(host, slot, cmd);
1332 /* must be called with host->lock held */
1333 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1334 struct mmc_request *mrq)
1336 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1341 if (host->state == STATE_IDLE) {
1342 host->state = STATE_SENDING_CMD;
1343 dw_mci_start_request(host, slot);
1345 list_add_tail(&slot->queue_node, &host->queue);
1349 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1351 struct dw_mci_slot *slot = mmc_priv(mmc);
1352 struct dw_mci *host = slot->host;
1357 * The check for card presence and queueing of the request must be
1358 * atomic, otherwise the card could be removed in between and the
1359 * request wouldn't fail until another card was inserted.
1361 spin_lock_bh(&host->lock);
1363 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1364 spin_unlock_bh(&host->lock);
1365 mrq->cmd->error = -ENOMEDIUM;
1366 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1367 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1369 mmc_request_done(mmc, mrq);
1373 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1374 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1376 dw_mci_queue_request(host, slot, mrq);
1378 spin_unlock_bh(&host->lock);
1381 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1383 struct dw_mci_slot *slot = mmc_priv(mmc);
1384 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1385 struct dw_mci *host = slot->host;
1387 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1390 #ifdef SDMMC_WAIT_FOR_UNBUSY
1391 unsigned long time_loop;
1394 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1395 if(host->svi_flags == 1)
1396 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1398 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1400 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1403 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1404 printk("%d..%s: no card. [%s]\n", \
1405 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1410 ret = time_before(jiffies, time_loop);
1411 regs = mci_readl(slot->host, STATUS);
1412 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1418 printk("slot->flags = %lu ", slot->flags);
1419 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1420 if(host->svi_flags != 1)
1423 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1424 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1428 switch (ios->bus_width) {
1429 case MMC_BUS_WIDTH_4:
1430 slot->ctype = SDMMC_CTYPE_4BIT;
1432 case MMC_BUS_WIDTH_8:
1433 slot->ctype = SDMMC_CTYPE_8BIT;
1436 /* set default 1 bit mode */
1437 slot->ctype = SDMMC_CTYPE_1BIT;
1438 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1441 regs = mci_readl(slot->host, UHS_REG);
1444 if (ios->timing == MMC_TIMING_UHS_DDR50)
1445 regs |= ((0x1 << slot->id) << 16);
1447 regs &= ~((0x1 << slot->id) << 16);
1449 mci_writel(slot->host, UHS_REG, regs);
1450 slot->host->timing = ios->timing;
1453 * Use mirror of ios->clock to prevent race with mmc
1454 * core ios update when finding the minimum.
1456 slot->clock = ios->clock;
1458 if (drv_data && drv_data->set_ios)
1459 drv_data->set_ios(slot->host, ios);
1461 /* Slot specific timing and width adjustment */
1462 dw_mci_setup_bus(slot, false);
1466 switch (ios->power_mode) {
1468 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1470 if (slot->host->pdata->setpower)
1471 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1472 regs = mci_readl(slot->host, PWREN);
1473 regs |= (1 << slot->id);
1474 mci_writel(slot->host, PWREN, regs);
1477 /* Power down slot */
1478 if(slot->host->pdata->setpower)
1479 slot->host->pdata->setpower(slot->id, 0);
1480 regs = mci_readl(slot->host, PWREN);
1481 regs &= ~(1 << slot->id);
1482 mci_writel(slot->host, PWREN, regs);
1489 static int dw_mci_get_ro(struct mmc_host *mmc)
1492 struct dw_mci_slot *slot = mmc_priv(mmc);
1493 struct dw_mci_board *brd = slot->host->pdata;
1495 /* Use platform get_ro function, else try on board write protect */
1496 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1498 else if(brd->get_ro)
1499 read_only = brd->get_ro(slot->id);
1500 else if(gpio_is_valid(slot->wp_gpio))
1501 read_only = gpio_get_value(slot->wp_gpio);
1504 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1506 dev_dbg(&mmc->class_dev, "card is %s\n",
1507 read_only ? "read-only" : "read-write");
1512 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1514 struct dw_mci_slot *slot = mmc_priv(mmc);
1515 struct dw_mci *host = slot->host;
1516 /*struct dw_mci_board *brd = slot->host->pdata;*/
1518 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1521 spin_lock_bh(&host->lock);
1524 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1526 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1528 spin_unlock_bh(&host->lock);
1530 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1531 if (!IS_ERR(host->hpclk_mmc) &&
1532 __clk_is_enabled(host->hpclk_mmc) == false)
1533 clk_prepare_enable(host->hpclk_mmc);
1534 if (__clk_is_enabled(host->hclk_mmc) == false)
1535 clk_prepare_enable(host->hclk_mmc);
1536 if (__clk_is_enabled(host->clk_mmc) == false)
1537 clk_prepare_enable(host->clk_mmc);
1539 if (__clk_is_enabled(host->clk_mmc) == true)
1540 clk_disable_unprepare(slot->host->clk_mmc);
1541 if (__clk_is_enabled(host->hclk_mmc) == true)
1542 clk_disable_unprepare(slot->host->hclk_mmc);
1543 if (!IS_ERR(host->hpclk_mmc) &&
1544 __clk_is_enabled(host->hpclk_mmc) == true)
1545 clk_disable_unprepare(slot->host->hpclk_mmc);
1548 mmc_detect_change(slot->mmc, 20);
1554 static int dw_mci_get_cd(struct mmc_host *mmc)
1557 struct dw_mci_slot *slot = mmc_priv(mmc);
1558 struct dw_mci_board *brd = slot->host->pdata;
1559 struct dw_mci *host = slot->host;
1560 int gpio_cd = mmc_gpio_get_cd(mmc);
1561 int force_jtag_bit, force_jtag_reg;
1565 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1566 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1567 gpio_cd = slot->cd_gpio;
1568 irq = gpio_to_irq(gpio_cd);
1569 if (gpio_is_valid(gpio_cd)) {
1570 gpio_val = gpio_get_value(gpio_cd);
1571 if (soc_is_rk3036()) {
1572 force_jtag_bit = 11;
1573 force_jtag_reg = RK312X_GRF_SOC_CON0;
1574 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1575 force_jtag_reg = RK312X_GRF_SOC_CON0;
1579 if (gpio_val == gpio_get_value(gpio_cd)) {
1580 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1582 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1583 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1584 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1587 dw_mci_ctrl_all_reset(host);
1589 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1590 /* Really card detected: SHOULD disable force_jtag */
1591 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1596 gpio_val = gpio_get_value(gpio_cd);
1598 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1599 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1600 return slot->last_detect_state;
1603 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1607 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1608 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1610 /* Use platform get_cd function, else try onboard card detect */
1611 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1613 else if (brd->get_cd)
1614 present = !brd->get_cd(slot->id);
1615 else if (!IS_ERR_VALUE(gpio_cd))
1618 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1621 spin_lock_bh(&host->lock);
1623 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1624 dev_dbg(&mmc->class_dev, "card is present\n");
1626 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1627 dev_dbg(&mmc->class_dev, "card is not present\n");
1629 spin_unlock_bh(&host->lock);
1636 * Dts Should caps emmc controller with poll-hw-reset
1638 static void dw_mci_hw_reset(struct mmc_host *mmc)
1640 struct dw_mci_slot *slot = mmc_priv(mmc);
1641 struct dw_mci *host = slot->host;
1646 unsigned long timeout;
1649 /* (1) CMD12 to end any transfer in process */
1650 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1651 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1653 if(host->mmc->hold_reg_flag)
1654 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1655 mci_writel(host, CMDARG, 0);
1657 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1659 timeout = jiffies + msecs_to_jiffies(500);
1661 ret = time_before(jiffies, timeout);
1662 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1667 MMC_DBG_ERR_FUNC(host->mmc,
1668 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1669 __func__, mmc_hostname(host->mmc));
1671 /* (2) wait DTO, even if no response is sent back by card */
1673 timeout = jiffies + msecs_to_jiffies(5);
1675 ret = time_before(jiffies, timeout);
1676 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1677 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1683 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1685 /* Software reset - BMOD[0] for IDMA only */
1686 regs = mci_readl(host, BMOD);
1687 regs |= SDMMC_IDMAC_SWRESET;
1688 mci_writel(host, BMOD, regs);
1689 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1690 regs = mci_readl(host, BMOD);
1691 if(regs & SDMMC_IDMAC_SWRESET)
1692 MMC_DBG_WARN_FUNC(host->mmc,
1693 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1694 __func__, mmc_hostname(host->mmc));
1696 /* DMA reset - CTRL[2] */
1697 regs = mci_readl(host, CTRL);
1698 regs |= SDMMC_CTRL_DMA_RESET;
1699 mci_writel(host, CTRL, regs);
1700 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1701 regs = mci_readl(host, CTRL);
1702 if(regs & SDMMC_CTRL_DMA_RESET)
1703 MMC_DBG_WARN_FUNC(host->mmc,
1704 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1705 __func__, mmc_hostname(host->mmc));
1707 /* FIFO reset - CTRL[1] */
1708 regs = mci_readl(host, CTRL);
1709 regs |= SDMMC_CTRL_FIFO_RESET;
1710 mci_writel(host, CTRL, regs);
1711 mdelay(1); /* no timing limited, 1ms is random value */
1712 regs = mci_readl(host, CTRL);
1713 if(regs & SDMMC_CTRL_FIFO_RESET)
1714 MMC_DBG_WARN_FUNC(host->mmc,
1715 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1716 __func__, mmc_hostname(host->mmc));
1719 According to eMMC spec
1720 tRstW >= 1us ; RST_n pulse width
1721 tRSCA >= 200us ; RST_n to Command time
1722 tRSTH >= 1us ; RST_n high period
1724 mci_writel(slot->host, PWREN, 0x0);
1725 mci_writel(slot->host, RST_N, 0x0);
1727 udelay(10); /* 10us for bad quality eMMc. */
1729 mci_writel(slot->host, PWREN, 0x1);
1730 mci_writel(slot->host, RST_N, 0x1);
1732 usleep_range(500, 1000); /* at least 500(> 200us) */
1736 * Disable lower power mode.
1738 * Low power mode will stop the card clock when idle. According to the
1739 * description of the CLKENA register we should disable low power mode
1740 * for SDIO cards if we need SDIO interrupts to work.
1742 * This function is fast if low power mode is already disabled.
1744 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1746 struct dw_mci *host = slot->host;
1748 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1750 clk_en_a = mci_readl(host, CLKENA);
1752 if (clk_en_a & clken_low_pwr) {
1753 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1754 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1755 SDMMC_CMD_PRV_DAT_WAIT, 0);
1759 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1761 struct dw_mci_slot *slot = mmc_priv(mmc);
1762 struct dw_mci *host = slot->host;
1763 unsigned long flags;
1767 spin_lock_irqsave(&host->slock, flags);
1769 /* Enable/disable Slot Specific SDIO interrupt */
1770 int_mask = mci_readl(host, INTMASK);
1772 if (host->verid < DW_MMC_240A)
1773 sdio_int = SDMMC_INT_SDIO(slot->id);
1775 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1779 * Turn off low power mode if it was enabled. This is a bit of
1780 * a heavy operation and we disable / enable IRQs a lot, so
1781 * we'll leave low power mode disabled and it will get
1782 * re-enabled again in dw_mci_setup_bus().
1784 dw_mci_disable_low_power(slot);
1786 mci_writel(host, INTMASK,
1787 (int_mask | sdio_int));
1789 mci_writel(host, INTMASK,
1790 (int_mask & ~sdio_int));
1793 spin_unlock_irqrestore(&host->slock, flags);
1796 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1798 IO_DOMAIN_12 = 1200,
1799 IO_DOMAIN_18 = 1800,
1800 IO_DOMAIN_33 = 3300,
1802 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1812 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1813 __FUNCTION__, mmc_hostname(host->mmc));
1816 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1817 __FUNCTION__, mmc_hostname(host->mmc));
1821 if (cpu_is_rk3288()) {
1822 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1823 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1826 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1827 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1828 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1832 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1833 __FUNCTION__, mmc_hostname(host->mmc));
1837 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1838 struct mmc_ios *ios)
1841 unsigned int value,uhs_reg;
1844 * Signal Voltage Switching is only applicable for Host Controllers
1847 if (host->verid < DW_MMC_240A)
1850 uhs_reg = mci_readl(host, UHS_REG);
1851 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1852 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1854 switch (ios->signal_voltage) {
1855 case MMC_SIGNAL_VOLTAGE_330:
1856 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1858 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1859 /* regulator_put(host->vmmc); //to be done in remove function. */
1861 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1862 __func__, regulator_get_voltage(host->vmmc), ret);
1864 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1865 " failed\n", mmc_hostname(host->mmc));
1868 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1870 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1871 __FUNCTION__, mmc_hostname(host->mmc));
1873 /* set High-power mode */
1874 value = mci_readl(host, CLKENA);
1875 value &= ~SDMMC_CLKEN_LOW_PWR;
1876 mci_writel(host,CLKENA , value);
1878 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1879 mci_writel(host,UHS_REG , uhs_reg);
1882 usleep_range(5000, 5500);
1884 /* 3.3V regulator output should be stable within 5 ms */
1885 uhs_reg = mci_readl(host, UHS_REG);
1886 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1889 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1890 mmc_hostname(host->mmc));
1893 case MMC_SIGNAL_VOLTAGE_180:
1895 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1896 /* regulator_put(host->vmmc);//to be done in remove function. */
1898 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1899 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1901 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1902 " failed\n", mmc_hostname(host->mmc));
1905 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1909 * Enable 1.8V Signal Enable in the Host Control2
1912 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1915 usleep_range(5000, 5500);
1916 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1917 __FUNCTION__,mmc_hostname(host->mmc));
1919 /* 1.8V regulator output should be stable within 5 ms */
1920 uhs_reg = mci_readl(host, UHS_REG);
1921 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1924 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1925 mmc_hostname(host->mmc));
1928 case MMC_SIGNAL_VOLTAGE_120:
1930 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1932 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1933 " failed\n", mmc_hostname(host->mmc));
1939 /* No signal voltage switch required */
1945 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1946 struct mmc_ios *ios)
1948 struct dw_mci_slot *slot = mmc_priv(mmc);
1949 struct dw_mci *host = slot->host;
1952 if (host->verid < DW_MMC_240A)
1955 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1961 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1963 struct dw_mci_slot *slot = mmc_priv(mmc);
1964 struct dw_mci *host = slot->host;
1965 const struct dw_mci_drv_data *drv_data = host->drv_data;
1966 struct dw_mci_tuning_data tuning_data;
1969 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1970 if(cpu_is_rk3036() || cpu_is_rk312x())
1973 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1974 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1975 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1976 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1977 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1978 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1979 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1983 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1984 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1985 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1988 "Undefined command(%d) for tuning\n", opcode);
1993 /* Recommend sample phase and delayline
1994 Fixme: Mix-use these three controllers will cause
1997 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1998 tuning_data.con_id = 3;
1999 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2000 tuning_data.con_id = 1;
2002 tuning_data.con_id = 0;
2004 /* 0: driver, from host->devices
2005 1: sample, from devices->host
2007 tuning_data.tuning_type = 1;
2009 if (drv_data && drv_data->execute_tuning)
2010 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2015 static void dw_mci_post_tmo(struct mmc_host *mmc)
2017 struct dw_mci_slot *slot = mmc_priv(mmc);
2018 struct dw_mci *host = slot->host;
2019 host->cur_slot->mrq = NULL;
2021 host->state = STATE_IDLE;
2024 static const struct mmc_host_ops dw_mci_ops = {
2025 .request = dw_mci_request,
2026 .pre_req = dw_mci_pre_req,
2027 .post_req = dw_mci_post_req,
2028 .set_ios = dw_mci_set_ios,
2029 .get_ro = dw_mci_get_ro,
2030 .get_cd = dw_mci_get_cd,
2031 .set_sdio_status = dw_mci_set_sdio_status,
2032 .hw_reset = dw_mci_hw_reset,
2033 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2034 .execute_tuning = dw_mci_execute_tuning,
2035 .post_tmo = dw_mci_post_tmo,
2036 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2037 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2038 .card_busy = dw_mci_card_busy,
2043 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2045 unsigned long flags;
2050 local_irq_save(flags);
2051 if(host->irq_state != irqflag)
2053 host->irq_state = irqflag;
2056 enable_irq(host->irq);
2060 disable_irq(host->irq);
2063 local_irq_restore(flags);
2067 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2068 __releases(&host->lock)
2069 __acquires(&host->lock)
2071 if(DW_MCI_SEND_STATUS == host->dir_status){
2073 if( MMC_BUS_TEST_W != host->cmd->opcode){
2074 if(host->data_status & SDMMC_INT_DCRC)
2075 host->data->error = -EILSEQ;
2076 else if(host->data_status & SDMMC_INT_EBE)
2077 host->data->error = -ETIMEDOUT;
2079 dw_mci_wait_unbusy(host);
2082 dw_mci_wait_unbusy(host);
2087 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2088 __releases(&host->lock)
2089 __acquires(&host->lock)
2091 struct dw_mci_slot *slot;
2092 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2094 //WARN_ON(host->cmd || host->data);
2096 dw_mci_deal_data_end(host, mrq);
2099 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2100 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2102 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2103 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2105 host->cur_slot->mrq = NULL;
2107 if (!list_empty(&host->queue)) {
2108 slot = list_entry(host->queue.next,
2109 struct dw_mci_slot, queue_node);
2110 list_del(&slot->queue_node);
2111 dev_vdbg(host->dev, "list not empty: %s is next\n",
2112 mmc_hostname(slot->mmc));
2113 host->state = STATE_SENDING_CMD;
2114 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2115 dw_mci_start_request(host, slot);
2117 dev_vdbg(host->dev, "list empty\n");
2118 host->state = STATE_IDLE;
2121 spin_unlock(&host->lock);
2122 mmc_request_done(prev_mmc, mrq);
2123 spin_lock(&host->lock);
2126 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2128 u32 status = host->cmd_status;
2130 host->cmd_status = 0;
2132 /* Read the response from the card (up to 16 bytes) */
2133 if (cmd->flags & MMC_RSP_PRESENT) {
2134 if (cmd->flags & MMC_RSP_136) {
2135 cmd->resp[3] = mci_readl(host, RESP0);
2136 cmd->resp[2] = mci_readl(host, RESP1);
2137 cmd->resp[1] = mci_readl(host, RESP2);
2138 cmd->resp[0] = mci_readl(host, RESP3);
2140 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2141 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2143 cmd->resp[0] = mci_readl(host, RESP0);
2147 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2148 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2152 if (status & SDMMC_INT_RTO)
2154 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2157 cmd->error = -ETIMEDOUT;
2158 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2159 cmd->error = -EILSEQ;
2160 }else if (status & SDMMC_INT_RESP_ERR){
2165 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2166 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2169 if(MMC_SEND_STATUS != cmd->opcode)
2170 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2171 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2172 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2176 /* newer ip versions need a delay between retries */
2177 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2183 static void dw_mci_tasklet_func(unsigned long priv)
2185 struct dw_mci *host = (struct dw_mci *)priv;
2186 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2187 struct mmc_data *data;
2188 struct mmc_command *cmd;
2189 enum dw_mci_state state;
2190 enum dw_mci_state prev_state;
2191 u32 status, cmd_flags;
2192 unsigned long timeout = 0;
2195 spin_lock(&host->lock);
2197 state = host->state;
2207 case STATE_SENDING_CMD:
2208 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2209 &host->pending_events))
2214 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2215 dw_mci_command_complete(host, cmd);
2216 if (cmd == host->mrq->sbc && !cmd->error) {
2217 prev_state = state = STATE_SENDING_CMD;
2218 __dw_mci_start_request(host, host->cur_slot,
2223 if (cmd->data && cmd->error) {
2224 dw_mci_stop_dma(host);
2227 send_stop_cmd(host, data);
2228 state = STATE_SENDING_STOP;
2231 /* host->data = NULL; */
2234 send_stop_abort(host, data);
2235 state = STATE_SENDING_STOP;
2238 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2241 if (!host->mrq->data || cmd->error) {
2242 dw_mci_request_end(host, host->mrq);
2246 prev_state = state = STATE_SENDING_DATA;
2249 case STATE_SENDING_DATA:
2250 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2251 dw_mci_stop_dma(host);
2254 send_stop_cmd(host, data);
2256 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2257 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2258 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2260 mci_writel(host, CMDARG, 0);
2262 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2263 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2265 if(host->mmc->hold_reg_flag)
2266 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2268 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2270 timeout = jiffies + msecs_to_jiffies(500);
2273 ret = time_before(jiffies, timeout);
2274 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2278 MMC_DBG_ERR_FUNC(host->mmc,
2279 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2280 __func__, mmc_hostname(host->mmc));
2283 send_stop_abort(host, data);
2285 state = STATE_DATA_ERROR;
2289 MMC_DBG_CMD_FUNC(host->mmc,
2290 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2291 prev_state,state, mmc_hostname(host->mmc));
2293 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2294 &host->pending_events))
2296 MMC_DBG_INFO_FUNC(host->mmc,
2297 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2298 prev_state,state,mmc_hostname(host->mmc));
2300 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2301 prev_state = state = STATE_DATA_BUSY;
2304 case STATE_DATA_BUSY:
2305 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2306 &host->pending_events))
2309 dw_mci_deal_data_end(host, host->mrq);
2310 MMC_DBG_INFO_FUNC(host->mmc,
2311 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2312 prev_state,state,mmc_hostname(host->mmc));
2314 /* host->data = NULL; */
2315 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2316 status = host->data_status;
2318 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2319 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2320 MMC_DBG_ERR_FUNC(host->mmc,
2321 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2322 prev_state,state, status, mmc_hostname(host->mmc));
2324 if (status & SDMMC_INT_DRTO) {
2325 data->error = -ETIMEDOUT;
2326 } else if (status & SDMMC_INT_DCRC) {
2327 data->error = -EILSEQ;
2328 } else if (status & SDMMC_INT_EBE &&
2329 host->dir_status == DW_MCI_SEND_STATUS){
2331 * No data CRC status was returned.
2332 * The number of bytes transferred will
2333 * be exaggerated in PIO mode.
2335 data->bytes_xfered = 0;
2336 data->error = -ETIMEDOUT;
2345 * After an error, there may be data lingering
2346 * in the FIFO, so reset it - doing so
2347 * generates a block interrupt, hence setting
2348 * the scatter-gather pointer to NULL.
2350 dw_mci_fifo_reset(host);
2352 data->bytes_xfered = data->blocks * data->blksz;
2357 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2358 prev_state,state,mmc_hostname(host->mmc));
2359 dw_mci_request_end(host, host->mrq);
2362 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2363 prev_state,state,mmc_hostname(host->mmc));
2365 if (host->mrq->sbc && !data->error) {
2366 data->stop->error = 0;
2368 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2369 prev_state,state,mmc_hostname(host->mmc));
2371 dw_mci_request_end(host, host->mrq);
2375 prev_state = state = STATE_SENDING_STOP;
2377 send_stop_cmd(host, data);
2379 if (data->stop && !data->error) {
2380 /* stop command for open-ended transfer*/
2382 send_stop_abort(host, data);
2386 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2387 prev_state,state,mmc_hostname(host->mmc));
2389 case STATE_SENDING_STOP:
2390 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2393 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2394 prev_state, state, mmc_hostname(host->mmc));
2396 /* CMD error in data command */
2397 if (host->mrq->cmd->error && host->mrq->data) {
2398 dw_mci_fifo_reset(host);
2402 host->data = NULL; */
2404 dw_mci_command_complete(host, host->mrq->stop);
2406 if (host->mrq->stop)
2407 dw_mci_command_complete(host, host->mrq->stop);
2409 host->cmd_status = 0;
2412 dw_mci_request_end(host, host->mrq);
2415 case STATE_DATA_ERROR:
2416 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2417 &host->pending_events))
2420 state = STATE_DATA_BUSY;
2423 } while (state != prev_state);
2425 host->state = state;
2427 spin_unlock(&host->lock);
2431 /* push final bytes to part_buf, only use during push */
2432 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2434 memcpy((void *)&host->part_buf, buf, cnt);
2435 host->part_buf_count = cnt;
2438 /* append bytes to part_buf, only use during push */
2439 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2441 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2442 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2443 host->part_buf_count += cnt;
2447 /* pull first bytes from part_buf, only use during pull */
2448 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2450 cnt = min(cnt, (int)host->part_buf_count);
2452 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2454 host->part_buf_count -= cnt;
2455 host->part_buf_start += cnt;
2460 /* pull final bytes from the part_buf, assuming it's just been filled */
2461 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2463 memcpy(buf, &host->part_buf, cnt);
2464 host->part_buf_start = cnt;
2465 host->part_buf_count = (1 << host->data_shift) - cnt;
2468 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2470 struct mmc_data *data = host->data;
2473 /* try and push anything in the part_buf */
2474 if (unlikely(host->part_buf_count)) {
2475 int len = dw_mci_push_part_bytes(host, buf, cnt);
2478 if (host->part_buf_count == 2) {
2479 mci_writew(host, DATA(host->data_offset),
2481 host->part_buf_count = 0;
2484 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2485 if (unlikely((unsigned long)buf & 0x1)) {
2487 u16 aligned_buf[64];
2488 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2489 int items = len >> 1;
2491 /* memcpy from input buffer into aligned buffer */
2492 memcpy(aligned_buf, buf, len);
2495 /* push data from aligned buffer into fifo */
2496 for (i = 0; i < items; ++i)
2497 mci_writew(host, DATA(host->data_offset),
2504 for (; cnt >= 2; cnt -= 2)
2505 mci_writew(host, DATA(host->data_offset), *pdata++);
2508 /* put anything remaining in the part_buf */
2510 dw_mci_set_part_bytes(host, buf, cnt);
2511 /* Push data if we have reached the expected data length */
2512 if ((data->bytes_xfered + init_cnt) ==
2513 (data->blksz * data->blocks))
2514 mci_writew(host, DATA(host->data_offset),
2519 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2521 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2522 if (unlikely((unsigned long)buf & 0x1)) {
2524 /* pull data from fifo into aligned buffer */
2525 u16 aligned_buf[64];
2526 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2527 int items = len >> 1;
2529 for (i = 0; i < items; ++i)
2530 aligned_buf[i] = mci_readw(host,
2531 DATA(host->data_offset));
2532 /* memcpy from aligned buffer into output buffer */
2533 memcpy(buf, aligned_buf, len);
2541 for (; cnt >= 2; cnt -= 2)
2542 *pdata++ = mci_readw(host, DATA(host->data_offset));
2546 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2547 dw_mci_pull_final_bytes(host, buf, cnt);
2551 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2553 struct mmc_data *data = host->data;
2556 /* try and push anything in the part_buf */
2557 if (unlikely(host->part_buf_count)) {
2558 int len = dw_mci_push_part_bytes(host, buf, cnt);
2561 if (host->part_buf_count == 4) {
2562 mci_writel(host, DATA(host->data_offset),
2564 host->part_buf_count = 0;
2567 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2568 if (unlikely((unsigned long)buf & 0x3)) {
2570 u32 aligned_buf[32];
2571 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2572 int items = len >> 2;
2574 /* memcpy from input buffer into aligned buffer */
2575 memcpy(aligned_buf, buf, len);
2578 /* push data from aligned buffer into fifo */
2579 for (i = 0; i < items; ++i)
2580 mci_writel(host, DATA(host->data_offset),
2587 for (; cnt >= 4; cnt -= 4)
2588 mci_writel(host, DATA(host->data_offset), *pdata++);
2591 /* put anything remaining in the part_buf */
2593 dw_mci_set_part_bytes(host, buf, cnt);
2594 /* Push data if we have reached the expected data length */
2595 if ((data->bytes_xfered + init_cnt) ==
2596 (data->blksz * data->blocks))
2597 mci_writel(host, DATA(host->data_offset),
2602 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2604 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2605 if (unlikely((unsigned long)buf & 0x3)) {
2607 /* pull data from fifo into aligned buffer */
2608 u32 aligned_buf[32];
2609 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2610 int items = len >> 2;
2612 for (i = 0; i < items; ++i)
2613 aligned_buf[i] = mci_readl(host,
2614 DATA(host->data_offset));
2615 /* memcpy from aligned buffer into output buffer */
2616 memcpy(buf, aligned_buf, len);
2624 for (; cnt >= 4; cnt -= 4)
2625 *pdata++ = mci_readl(host, DATA(host->data_offset));
2629 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2630 dw_mci_pull_final_bytes(host, buf, cnt);
2634 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2636 struct mmc_data *data = host->data;
2639 /* try and push anything in the part_buf */
2640 if (unlikely(host->part_buf_count)) {
2641 int len = dw_mci_push_part_bytes(host, buf, cnt);
2645 if (host->part_buf_count == 8) {
2646 mci_writeq(host, DATA(host->data_offset),
2648 host->part_buf_count = 0;
2651 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2652 if (unlikely((unsigned long)buf & 0x7)) {
2654 u64 aligned_buf[16];
2655 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2656 int items = len >> 3;
2658 /* memcpy from input buffer into aligned buffer */
2659 memcpy(aligned_buf, buf, len);
2662 /* push data from aligned buffer into fifo */
2663 for (i = 0; i < items; ++i)
2664 mci_writeq(host, DATA(host->data_offset),
2671 for (; cnt >= 8; cnt -= 8)
2672 mci_writeq(host, DATA(host->data_offset), *pdata++);
2675 /* put anything remaining in the part_buf */
2677 dw_mci_set_part_bytes(host, buf, cnt);
2678 /* Push data if we have reached the expected data length */
2679 if ((data->bytes_xfered + init_cnt) ==
2680 (data->blksz * data->blocks))
2681 mci_writeq(host, DATA(host->data_offset),
2686 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2688 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2689 if (unlikely((unsigned long)buf & 0x7)) {
2691 /* pull data from fifo into aligned buffer */
2692 u64 aligned_buf[16];
2693 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2694 int items = len >> 3;
2696 for (i = 0; i < items; ++i)
2697 aligned_buf[i] = mci_readq(host,
2698 DATA(host->data_offset));
2699 /* memcpy from aligned buffer into output buffer */
2700 memcpy(buf, aligned_buf, len);
2708 for (; cnt >= 8; cnt -= 8)
2709 *pdata++ = mci_readq(host, DATA(host->data_offset));
2713 host->part_buf = mci_readq(host, DATA(host->data_offset));
2714 dw_mci_pull_final_bytes(host, buf, cnt);
2718 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2722 /* get remaining partial bytes */
2723 len = dw_mci_pull_part_bytes(host, buf, cnt);
2724 if (unlikely(len == cnt))
2729 /* get the rest of the data */
2730 host->pull_data(host, buf, cnt);
2733 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2735 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2737 unsigned int offset;
2738 struct mmc_data *data = host->data;
2739 int shift = host->data_shift;
2742 unsigned int remain, fcnt;
2744 if(!host->mmc->bus_refs){
2745 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2749 if (!sg_miter_next(sg_miter))
2752 host->sg = sg_miter->piter.sg;
2753 buf = sg_miter->addr;
2754 remain = sg_miter->length;
2758 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2759 << shift) + host->part_buf_count;
2760 len = min(remain, fcnt);
2763 dw_mci_pull_data(host, (void *)(buf + offset), len);
2764 data->bytes_xfered += len;
2769 sg_miter->consumed = offset;
2770 status = mci_readl(host, MINTSTS);
2771 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2772 /* if the RXDR is ready read again */
2773 } while ((status & SDMMC_INT_RXDR) ||
2774 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2777 if (!sg_miter_next(sg_miter))
2779 sg_miter->consumed = 0;
2781 sg_miter_stop(sg_miter);
2785 sg_miter_stop(sg_miter);
2789 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2792 static void dw_mci_write_data_pio(struct dw_mci *host)
2794 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2796 unsigned int offset;
2797 struct mmc_data *data = host->data;
2798 int shift = host->data_shift;
2801 unsigned int fifo_depth = host->fifo_depth;
2802 unsigned int remain, fcnt;
2804 if(!host->mmc->bus_refs){
2805 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2810 if (!sg_miter_next(sg_miter))
2813 host->sg = sg_miter->piter.sg;
2814 buf = sg_miter->addr;
2815 remain = sg_miter->length;
2819 fcnt = ((fifo_depth -
2820 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2821 << shift) - host->part_buf_count;
2822 len = min(remain, fcnt);
2825 host->push_data(host, (void *)(buf + offset), len);
2826 data->bytes_xfered += len;
2831 sg_miter->consumed = offset;
2832 status = mci_readl(host, MINTSTS);
2833 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2834 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2837 if (!sg_miter_next(sg_miter))
2839 sg_miter->consumed = 0;
2841 sg_miter_stop(sg_miter);
2845 sg_miter_stop(sg_miter);
2849 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2852 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2854 if (!host->cmd_status)
2855 host->cmd_status = status;
2862 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2863 tasklet_schedule(&host->tasklet);
2866 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2868 struct dw_mci *host = dev_id;
2869 u32 pending, sdio_int;
2872 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2875 * DTO fix - version 2.10a and below, and only if internal DMA
2878 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2880 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2881 pending |= SDMMC_INT_DATA_OVER;
2885 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2886 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2887 host->cmd_status = pending;
2889 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2890 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2892 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2895 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2896 /* if there is an error report DATA_ERROR */
2897 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2898 host->data_status = pending;
2900 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2902 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2903 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2904 tasklet_schedule(&host->tasklet);
2907 if (pending & SDMMC_INT_DATA_OVER) {
2908 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2909 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2910 if (!host->data_status)
2911 host->data_status = pending;
2913 if (host->dir_status == DW_MCI_RECV_STATUS) {
2914 if (host->sg != NULL)
2915 dw_mci_read_data_pio(host, true);
2917 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2918 tasklet_schedule(&host->tasklet);
2921 if (pending & SDMMC_INT_RXDR) {
2922 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2923 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2924 dw_mci_read_data_pio(host, false);
2927 if (pending & SDMMC_INT_TXDR) {
2928 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2929 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2930 dw_mci_write_data_pio(host);
2933 if (pending & SDMMC_INT_VSI) {
2934 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2935 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2936 dw_mci_cmd_interrupt(host, pending);
2939 if (pending & SDMMC_INT_CMD_DONE) {
2940 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2941 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2942 dw_mci_cmd_interrupt(host, pending);
2945 if (pending & SDMMC_INT_CD) {
2946 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2947 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2948 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2949 queue_work(host->card_workqueue, &host->card_work);
2952 if (pending & SDMMC_INT_HLE) {
2953 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2954 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2958 /* Handle SDIO Interrupts */
2959 for (i = 0; i < host->num_slots; i++) {
2960 struct dw_mci_slot *slot = host->slot[i];
2962 if (host->verid < DW_MMC_240A)
2963 sdio_int = SDMMC_INT_SDIO(i);
2965 sdio_int = SDMMC_INT_SDIO(i + 8);
2967 if (pending & sdio_int) {
2968 mci_writel(host, RINTSTS, sdio_int);
2969 mmc_signal_sdio_irq(slot->mmc);
2975 #ifdef CONFIG_MMC_DW_IDMAC
2976 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2977 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2978 /* Handle DMA interrupts */
2979 pending = mci_readl(host, IDSTS);
2980 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2981 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2982 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2983 host->dma_ops->complete((void *)host);
2991 static void dw_mci_work_routine_card(struct work_struct *work)
2993 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2996 for (i = 0; i < host->num_slots; i++) {
2997 struct dw_mci_slot *slot = host->slot[i];
2998 struct mmc_host *mmc = slot->mmc;
2999 struct mmc_request *mrq;
3002 present = dw_mci_get_cd(mmc);
3004 /* Card insert, switch data line to uart function, and vice verse.
3005 eONLY audi chip need switched by software, using udbg tag in dts!
3007 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3009 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3010 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3011 mmc_hostname(host->mmc));
3013 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3014 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3015 mmc_hostname(host->mmc));
3019 while (present != slot->last_detect_state) {
3020 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3021 present ? "inserted" : "removed");
3022 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3023 present ? "inserted" : "removed.", mmc_hostname(mmc));
3025 dw_mci_ctrl_all_reset(host);
3026 /* Stop edma when rountine card triggered */
3027 if(cpu_is_rk3036() || cpu_is_rk312x())
3028 if(host->dma_ops && host->dma_ops->stop)
3029 host->dma_ops->stop(host);
3030 rk_send_wakeup_key();//wake up system
3031 spin_lock_bh(&host->lock);
3033 /* Card change detected */
3034 slot->last_detect_state = present;
3036 /* Clean up queue if present */
3039 if (mrq == host->mrq) {
3043 switch (host->state) {
3046 case STATE_SENDING_CMD:
3047 mrq->cmd->error = -ENOMEDIUM;
3051 case STATE_SENDING_DATA:
3052 mrq->data->error = -ENOMEDIUM;
3053 dw_mci_stop_dma(host);
3055 case STATE_DATA_BUSY:
3056 case STATE_DATA_ERROR:
3057 if (mrq->data->error == -EINPROGRESS)
3058 mrq->data->error = -ENOMEDIUM;
3062 case STATE_SENDING_STOP:
3063 mrq->stop->error = -ENOMEDIUM;
3067 dw_mci_request_end(host, mrq);
3069 list_del(&slot->queue_node);
3070 mrq->cmd->error = -ENOMEDIUM;
3072 mrq->data->error = -ENOMEDIUM;
3074 mrq->stop->error = -ENOMEDIUM;
3076 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3077 mrq->cmd->opcode, mmc_hostname(mmc));
3079 spin_unlock(&host->lock);
3080 mmc_request_done(slot->mmc, mrq);
3081 spin_lock(&host->lock);
3085 /* Power down slot */
3087 /* Clear down the FIFO */
3088 dw_mci_fifo_reset(host);
3089 #ifdef CONFIG_MMC_DW_IDMAC
3090 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3091 dw_mci_idmac_reset(host);
3096 spin_unlock_bh(&host->lock);
3098 present = dw_mci_get_cd(mmc);
3101 mmc_detect_change(slot->mmc,
3102 msecs_to_jiffies(host->pdata->detect_delay_ms));
3107 /* given a slot id, find out the device node representing that slot */
3108 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3110 struct device_node *np;
3114 if (!dev || !dev->of_node)
3117 for_each_child_of_node(dev->of_node, np) {
3118 addr = of_get_property(np, "reg", &len);
3119 if (!addr || (len < sizeof(int)))
3121 if (be32_to_cpup(addr) == slot)
3127 static struct dw_mci_of_slot_quirks {
3130 } of_slot_quirks[] = {
3132 .quirk = "disable-wp",
3133 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3137 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3139 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3144 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3145 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3146 quirks |= of_slot_quirks[idx].id;
3151 /* find out bus-width for a given slot */
3152 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3154 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3160 if (of_property_read_u32(np, "bus-width", &bus_wd))
3161 dev_err(dev, "bus-width property not found, assuming width"
3167 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3168 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3170 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3176 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3178 /* Having a missing entry is valid; return silently */
3179 if (!gpio_is_valid(gpio))
3182 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3183 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3187 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3193 /* find the write protect gpio for a given slot; or -1 if none specified */
3194 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3196 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3202 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3204 /* Having a missing entry is valid; return silently */
3205 if (!gpio_is_valid(gpio))
3208 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3209 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3216 /* find the cd gpio for a given slot */
3217 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3218 struct mmc_host *mmc)
3220 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3226 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3228 /* Having a missing entry is valid; return silently */
3229 if (!gpio_is_valid(gpio))
3232 if (mmc_gpio_request_cd(mmc, gpio, 0))
3233 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3236 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3238 struct mmc_host *mmc = dev_id;
3239 struct dw_mci_slot *slot = mmc_priv(mmc);
3240 struct dw_mci *host = slot->host;
3241 int gpio_cd = slot->cd_gpio;
3243 (gpio_get_value(gpio_cd) == 0) ?
3244 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3245 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3247 /* wakeup system whether gpio debounce or not */
3248 rk_send_wakeup_key();
3250 /* no need to trigger detect flow when rescan is disabled.
3251 This case happended in dpm, that we just wakeup system and
3252 let suspend_post notify callback handle it.
3254 if(mmc->rescan_disable == 0)
3255 queue_work(host->card_workqueue, &host->card_work);
3257 printk("%s: rescan been disabled!\n", __FUNCTION__);
3262 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3263 struct mmc_host *mmc)
3265 struct dw_mci_slot *slot = mmc_priv(mmc);
3266 struct dw_mci *host = slot->host;
3270 /* Having a missing entry is valid; return silently */
3271 if (!gpio_is_valid(gpio))
3274 irq = gpio_to_irq(gpio);
3276 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3277 NULL, dw_mci_gpio_cd_irqt,
3278 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3282 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3284 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3285 enable_irq_wake(irq);
3288 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3292 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3293 struct mmc_host *mmc)
3295 if (!gpio_is_valid(gpio))
3298 if (gpio_to_irq(gpio) >= 0) {
3299 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3300 devm_gpio_free(&mmc->class_dev, gpio);
3303 #else /* CONFIG_OF */
3304 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3308 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3312 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3316 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3320 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3321 struct mmc_host *mmc)
3325 #endif /* CONFIG_OF */
3327 /* @host: dw_mci host prvdata
3328 * Init pinctrl for each platform. Usually we assign
3329 * "defalut" tag for functional usage, "idle" tag for gpio
3330 * state and "udbg" tag for uart_dbg if any.
3332 static void dw_mci_init_pinctrl(struct dw_mci *host)
3334 /* Fixme: DON'T TOUCH EMMC SETTING! */
3335 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3338 /* Get pinctrl for DTS */
3339 host->pinctrl = devm_pinctrl_get(host->dev);
3340 if (IS_ERR(host->pinctrl)) {
3341 dev_err(host->dev, "%s: No pinctrl used!\n",
3342 mmc_hostname(host->mmc));
3346 /* Lookup idle state */
3347 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3348 PINCTRL_STATE_IDLE);
3349 if (IS_ERR(host->pins_idle)) {
3350 dev_err(host->dev, "%s: No idle tag found!\n",
3351 mmc_hostname(host->mmc));
3353 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3354 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3355 mmc_hostname(host->mmc));
3358 /* Lookup default state */
3359 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3360 PINCTRL_STATE_DEFAULT);
3361 if (IS_ERR(host->pins_default)) {
3362 dev_err(host->dev, "%s: No default pinctrl found!\n",
3363 mmc_hostname(host->mmc));
3365 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3366 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3367 mmc_hostname(host->mmc));
3370 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3371 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3372 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3373 if (IS_ERR(host->pins_udbg)) {
3374 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3375 mmc_hostname(host->mmc));
3377 if (!dw_mci_get_cd(host->mmc))
3378 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3379 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3380 mmc_hostname(host->mmc));
3385 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3386 unsigned long mode, void *unused)
3388 struct mmc_host *host = container_of(
3389 notify_block, struct mmc_host, pm_notify);
3390 unsigned long flags;
3393 case PM_HIBERNATION_PREPARE:
3394 case PM_SUSPEND_PREPARE:
3395 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3396 spin_lock_irqsave(&host->lock, flags);
3397 host->rescan_disable = 1;
3398 spin_unlock_irqrestore(&host->lock, flags);
3399 if (cancel_delayed_work(&host->detect))
3400 wake_unlock(&host->detect_wake_lock);
3403 case PM_POST_SUSPEND:
3404 case PM_POST_HIBERNATION:
3405 case PM_POST_RESTORE:
3406 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3407 spin_lock_irqsave(&host->lock, flags);
3408 host->rescan_disable = 0;
3409 spin_unlock_irqrestore(&host->lock, flags);
3410 mmc_detect_change(host, 10);
3416 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3418 struct mmc_host *mmc;
3419 struct dw_mci_slot *slot;
3420 const struct dw_mci_drv_data *drv_data = host->drv_data;
3425 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3429 slot = mmc_priv(mmc);
3433 host->slot[id] = slot;
3436 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3438 mmc->ops = &dw_mci_ops;
3440 if (of_property_read_u32_array(host->dev->of_node,
3441 "clock-freq-min-max", freq, 2)) {
3442 mmc->f_min = DW_MCI_FREQ_MIN;
3443 mmc->f_max = DW_MCI_FREQ_MAX;
3445 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3446 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3448 mmc->f_min = freq[0];
3449 mmc->f_max = freq[1];
3451 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3452 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3455 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3457 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3458 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3459 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3460 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3461 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3462 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3464 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3465 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3466 if (register_pm_notifier(&mmc->pm_notify)) {
3467 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3468 goto err_pm_notifier;
3472 /* We assume only low-level chip use gpio_cd */
3473 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3474 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3475 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3476 if (gpio_is_valid(slot->cd_gpio)) {
3477 /* Request gpio int for card detection */
3478 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3480 slot->cd_gpio = -ENODEV;
3481 dev_err(host->dev, "failed to get your cd-gpios!\n");
3485 if (host->pdata->get_ocr)
3486 mmc->ocr_avail = host->pdata->get_ocr(id);
3489 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3490 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3491 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3492 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3496 * Start with slot power disabled, it will be enabled when a card
3499 if (host->pdata->setpower)
3500 host->pdata->setpower(id, 0);
3502 if (host->pdata->caps)
3503 mmc->caps = host->pdata->caps;
3505 if (host->pdata->pm_caps)
3506 mmc->pm_caps = host->pdata->pm_caps;
3508 if (host->dev->of_node) {
3509 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3513 ctrl_id = to_platform_device(host->dev)->id;
3515 if (drv_data && drv_data->caps)
3516 mmc->caps |= drv_data->caps[ctrl_id];
3517 if (drv_data && drv_data->hold_reg_flag)
3518 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3520 /* set the compatibility of driver. */
3521 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3522 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3524 if (host->pdata->caps2)
3525 mmc->caps2 = host->pdata->caps2;
3527 if (host->pdata->get_bus_wd)
3528 bus_width = host->pdata->get_bus_wd(slot->id);
3529 else if (host->dev->of_node)
3530 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3534 switch (bus_width) {
3536 mmc->caps |= MMC_CAP_8_BIT_DATA;
3538 mmc->caps |= MMC_CAP_4_BIT_DATA;
3541 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3542 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3543 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3544 mmc->caps |= MMC_CAP_SDIO_IRQ;
3545 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3546 mmc->caps |= MMC_CAP_HW_RESET;
3547 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3548 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3549 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3550 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3551 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3552 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3553 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3554 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3556 /*Assign pm_caps pass to pm_flags*/
3557 mmc->pm_flags = mmc->pm_caps;
3559 if (host->pdata->blk_settings) {
3560 mmc->max_segs = host->pdata->blk_settings->max_segs;
3561 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3562 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3563 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3564 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3566 /* Useful defaults if platform data is unset. */
3567 #ifdef CONFIG_MMC_DW_IDMAC
3568 mmc->max_segs = host->ring_size;
3569 mmc->max_blk_size = 65536;
3570 mmc->max_blk_count = host->ring_size;
3571 mmc->max_seg_size = 0x1000;
3572 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3573 if(cpu_is_rk3036() || cpu_is_rk312x()){
3574 /* fixup for external dmac setting */
3576 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3577 mmc->max_blk_count = 65535;
3578 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3579 mmc->max_seg_size = mmc->max_req_size;
3583 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3584 mmc->max_blk_count = 512;
3585 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3586 mmc->max_seg_size = mmc->max_req_size;
3587 #endif /* CONFIG_MMC_DW_IDMAC */
3591 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3593 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3598 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3599 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3603 if (IS_ERR(host->vmmc)) {
3604 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3607 ret = regulator_enable(host->vmmc);
3610 "failed to enable regulator: %d\n", ret);
3617 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3619 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3620 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3622 dw_mci_init_pinctrl(host);
3623 ret = mmc_add_host(mmc);
3627 #if defined(CONFIG_DEBUG_FS)
3628 dw_mci_init_debugfs(slot);
3631 /* Card initially undetected */
3632 slot->last_detect_state = 1;
3636 unregister_pm_notifier(&mmc->pm_notify);
3639 if (gpio_is_valid(slot->cd_gpio))
3640 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3645 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3647 /* Shutdown detect IRQ */
3648 if (slot->host->pdata->exit)
3649 slot->host->pdata->exit(id);
3651 /* Debugfs stuff is cleaned up by mmc core */
3652 mmc_remove_host(slot->mmc);
3653 slot->host->slot[id] = NULL;
3654 mmc_free_host(slot->mmc);
3657 static void dw_mci_init_dma(struct dw_mci *host)
3659 /* Alloc memory for sg translation */
3660 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3661 &host->sg_dma, GFP_KERNEL);
3662 if (!host->sg_cpu) {
3663 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3668 memset(host->sg_cpu, 0, PAGE_SIZE);
3671 /* Determine which DMA interface to use */
3672 #if defined(CONFIG_MMC_DW_IDMAC)
3673 if(cpu_is_rk3036() || cpu_is_rk312x()){
3674 host->dma_ops = &dw_mci_edmac_ops;
3675 dev_info(host->dev, "Using external DMA controller.\n");
3677 host->dma_ops = &dw_mci_idmac_ops;
3678 dev_info(host->dev, "Using internal DMA controller.\n");
3685 if (host->dma_ops->init && host->dma_ops->start &&
3686 host->dma_ops->stop && host->dma_ops->cleanup) {
3687 if (host->dma_ops->init(host)) {
3688 dev_err(host->dev, "%s: Unable to initialize "
3689 "DMA Controller.\n", __func__);
3693 dev_err(host->dev, "DMA initialization not found.\n");
3701 dev_info(host->dev, "Using PIO mode.\n");
3706 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3708 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3711 ctrl = mci_readl(host, CTRL);
3713 mci_writel(host, CTRL, ctrl);
3715 /* wait till resets clear */
3717 ctrl = mci_readl(host, CTRL);
3718 if (!(ctrl & reset))
3720 } while (time_before(jiffies, timeout));
3723 "Timeout resetting block (ctrl reset %#x)\n",
3729 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3732 * Reseting generates a block interrupt, hence setting
3733 * the scatter-gather pointer to NULL.
3736 sg_miter_stop(&host->sg_miter);
3740 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3743 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3745 return dw_mci_ctrl_reset(host,
3746 SDMMC_CTRL_FIFO_RESET |
3748 SDMMC_CTRL_DMA_RESET);
3753 static struct dw_mci_of_quirks {
3758 .quirk = "broken-cd",
3759 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3763 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3765 struct dw_mci_board *pdata;
3766 struct device *dev = host->dev;
3767 struct device_node *np = dev->of_node;
3768 const struct dw_mci_drv_data *drv_data = host->drv_data;
3770 u32 clock_frequency;
3772 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3774 dev_err(dev, "could not allocate memory for pdata\n");
3775 return ERR_PTR(-ENOMEM);
3778 /* find out number of slots supported */
3779 if (of_property_read_u32(dev->of_node, "num-slots",
3780 &pdata->num_slots)) {
3781 dev_info(dev, "num-slots property not found, "
3782 "assuming 1 slot is available\n");
3783 pdata->num_slots = 1;
3787 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3788 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3789 pdata->quirks |= of_quirks[idx].id;
3792 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3793 dev_info(dev, "fifo-depth property not found, using "
3794 "value of FIFOTH register as default\n");
3796 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3798 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3799 pdata->bus_hz = clock_frequency;
3801 if (drv_data && drv_data->parse_dt) {
3802 ret = drv_data->parse_dt(host);
3804 return ERR_PTR(ret);
3807 if (of_find_property(np, "keep-power-in-suspend", NULL))
3808 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3810 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3811 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3813 if (of_find_property(np, "supports-highspeed", NULL))
3814 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3816 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3817 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3819 if (of_find_property(np, "supports-DDR_MODE", NULL))
3820 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3822 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3823 pdata->caps2 |= MMC_CAP2_HS200;
3825 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3826 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3828 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3829 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3831 if (of_get_property(np, "cd-inverted", NULL))
3832 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3833 if (of_get_property(np, "bootpart-no-access", NULL))
3834 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3839 #else /* CONFIG_OF */
3840 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3842 return ERR_PTR(-EINVAL);
3844 #endif /* CONFIG_OF */
3846 int dw_mci_probe(struct dw_mci *host)
3848 const struct dw_mci_drv_data *drv_data = host->drv_data;
3849 int width, i, ret = 0;
3855 host->pdata = dw_mci_parse_dt(host);
3856 if (IS_ERR(host->pdata)) {
3857 dev_err(host->dev, "platform data not available\n");
3862 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3864 "Platform data must supply select_slot function\n");
3869 * In 2.40a spec, Data offset is changed.
3870 * Need to check the version-id and set data-offset for DATA register.
3872 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3873 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3875 if (host->verid < DW_MMC_240A)
3876 host->data_offset = DATA_OFFSET;
3878 host->data_offset = DATA_240A_OFFSET;
3881 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
3882 if (IS_ERR(host->hpclk_mmc)) {
3883 dev_err(host->dev, "failed to get hpclk_mmc\n");
3885 clk_prepare_enable(host->hpclk_mmc);
3889 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3890 if (IS_ERR(host->hclk_mmc)) {
3891 dev_err(host->dev, "failed to get hclk_mmc\n");
3892 ret = PTR_ERR(host->hclk_mmc);
3896 clk_prepare_enable(host->hclk_mmc);
3899 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3900 if (IS_ERR(host->clk_mmc)) {
3901 dev_err(host->dev, "failed to get clk mmc_per\n");
3902 ret = PTR_ERR(host->clk_mmc);
3906 host->bus_hz = host->pdata->bus_hz;
3907 if (!host->bus_hz) {
3908 dev_err(host->dev,"Platform data must supply bus speed\n");
3913 if (host->verid < DW_MMC_240A)
3914 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3916 //rockchip: fix divider 2 in clksum before controlller
3917 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3920 dev_err(host->dev, "failed to set clk mmc\n");
3923 clk_prepare_enable(host->clk_mmc);
3925 if (drv_data && drv_data->setup_clock) {
3926 ret = drv_data->setup_clock(host);
3929 "implementation specific clock setup failed\n");
3934 host->quirks = host->pdata->quirks;
3935 host->irq_state = true;
3936 host->set_speed = 0;
3938 host->svi_flags = 0;
3940 spin_lock_init(&host->lock);
3941 spin_lock_init(&host->slock);
3943 INIT_LIST_HEAD(&host->queue);
3945 * Get the host data width - this assumes that HCON has been set with
3946 * the correct values.
3948 i = (mci_readl(host, HCON) >> 7) & 0x7;
3950 host->push_data = dw_mci_push_data16;
3951 host->pull_data = dw_mci_pull_data16;
3953 host->data_shift = 1;
3954 } else if (i == 2) {
3955 host->push_data = dw_mci_push_data64;
3956 host->pull_data = dw_mci_pull_data64;
3958 host->data_shift = 3;
3960 /* Check for a reserved value, and warn if it is */
3962 "HCON reports a reserved host data width!\n"
3963 "Defaulting to 32-bit access.\n");
3964 host->push_data = dw_mci_push_data32;
3965 host->pull_data = dw_mci_pull_data32;
3967 host->data_shift = 2;
3970 /* Reset all blocks */
3971 if (!dw_mci_ctrl_all_reset(host))
3974 host->dma_ops = host->pdata->dma_ops;
3975 dw_mci_init_dma(host);
3977 /* Clear the interrupts for the host controller */
3978 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3979 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3981 /* Put in max timeout */
3982 mci_writel(host, TMOUT, 0xFFFFFFFF);
3985 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3986 * Tx Mark = fifo_size / 2 DMA Size = 8
3988 if (!host->pdata->fifo_depth) {
3990 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3991 * have been overwritten by the bootloader, just like we're
3992 * about to do, so if you know the value for your hardware, you
3993 * should put it in the platform data.
3995 fifo_size = mci_readl(host, FIFOTH);
3996 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3998 fifo_size = host->pdata->fifo_depth;
4000 host->fifo_depth = fifo_size;
4002 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4003 mci_writel(host, FIFOTH, host->fifoth_val);
4005 /* disable clock to CIU */
4006 mci_writel(host, CLKENA, 0);
4007 mci_writel(host, CLKSRC, 0);
4009 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4010 host->card_workqueue = alloc_workqueue("dw-mci-card",
4011 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4012 if (!host->card_workqueue) {
4016 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4017 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4018 host->irq_flags, "dw-mci", host);
4022 if (host->pdata->num_slots)
4023 host->num_slots = host->pdata->num_slots;
4025 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4027 /* We need at least one slot to succeed */
4028 for (i = 0; i < host->num_slots; i++) {
4029 ret = dw_mci_init_slot(host, i);
4031 dev_dbg(host->dev, "slot %d init failed\n", i);
4037 * Enable interrupts for command done, data over, data empty, card det,
4038 * receive ready and error such as transmit, receive timeout, crc error
4040 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4041 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4042 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4043 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4044 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4045 regs |= SDMMC_INT_CD;
4047 mci_writel(host, INTMASK, regs);
4049 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4051 dev_info(host->dev, "DW MMC controller at irq %d, "
4052 "%d bit host data width, "
4054 host->irq, width, fifo_size);
4057 dev_info(host->dev, "%d slots initialized\n", init_slots);
4059 dev_dbg(host->dev, "attempted to initialize %d slots, "
4060 "but failed on all\n", host->num_slots);
4065 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4066 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4071 destroy_workqueue(host->card_workqueue);
4074 if (host->use_dma && host->dma_ops->exit)
4075 host->dma_ops->exit(host);
4078 regulator_disable(host->vmmc);
4079 regulator_put(host->vmmc);
4083 if (!IS_ERR(host->clk_mmc))
4084 clk_disable_unprepare(host->clk_mmc);
4086 if (!IS_ERR(host->hclk_mmc))
4087 clk_disable_unprepare(host->hclk_mmc);
4090 EXPORT_SYMBOL(dw_mci_probe);
4092 void dw_mci_remove(struct dw_mci *host)
4094 struct mmc_host *mmc = host->mmc;
4095 struct dw_mci_slot *slot = mmc_priv(mmc);
4098 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4099 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4101 for(i = 0; i < host->num_slots; i++){
4102 dev_dbg(host->dev, "remove slot %d\n", i);
4104 dw_mci_cleanup_slot(host->slot[i], i);
4107 /* disable clock to CIU */
4108 mci_writel(host, CLKENA, 0);
4109 mci_writel(host, CLKSRC, 0);
4111 destroy_workqueue(host->card_workqueue);
4112 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4113 unregister_pm_notifier(&host->mmc->pm_notify);
4115 if (host->use_dma && host->dma_ops->exit)
4116 host->dma_ops->exit(host);
4118 if (gpio_is_valid(slot->cd_gpio))
4119 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4122 regulator_disable(host->vmmc);
4123 regulator_put(host->vmmc);
4125 if (!IS_ERR(host->clk_mmc))
4126 clk_disable_unprepare(host->clk_mmc);
4128 if (!IS_ERR(host->hclk_mmc))
4129 clk_disable_unprepare(host->hclk_mmc);
4130 if (!IS_ERR(host->hpclk_mmc))
4131 clk_disable_unprepare(host->hpclk_mmc);
4133 EXPORT_SYMBOL(dw_mci_remove);
4137 #ifdef CONFIG_PM_SLEEP
4139 * TODO: we should probably disable the clock to the card in the suspend path.
4141 extern int get_wifi_chip_type(void);
4142 int dw_mci_suspend(struct dw_mci *host)
4144 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4145 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4149 regulator_disable(host->vmmc);
4151 /*only for sdmmc controller*/
4152 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4153 disable_irq(host->irq);
4154 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4155 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4156 mmc_hostname(host->mmc));
4158 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4159 mci_writel(host, INTMASK, 0x00);
4160 mci_writel(host, CTRL, 0x00);
4162 /* Soc rk3126/3036 already in gpio_cd mode */
4163 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4164 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4165 enable_irq_wake(host->mmc->slot.cd_irq);
4170 EXPORT_SYMBOL(dw_mci_suspend);
4172 int dw_mci_resume(struct dw_mci *host)
4174 int i, ret, retry_cnt = 0;
4176 struct dw_mci_slot *slot;
4178 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4179 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4184 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4185 slot = mmc_priv(host->mmc);
4186 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4190 /*only for sdmmc controller*/
4191 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4192 /* Soc rk3126/3036 already in gpio_cd mode */
4193 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4194 disable_irq_wake(host->mmc->slot.cd_irq);
4195 mmc_gpio_free_cd(host->mmc);
4197 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4198 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4199 mmc_hostname(host->mmc));
4203 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4204 else if(cpu_is_rk3036())
4205 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4206 else if(cpu_is_rk312x())
4207 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4208 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4209 else if(host->cid == DW_MCI_TYPE_RK3368)
4210 regmap_write(host->grf, 0x43c, ((1 << 13) << 16) | (0 << 13));
4213 ret = regulator_enable(host->vmmc);
4216 "failed to enable regulator: %d\n", ret);
4221 if(!dw_mci_ctrl_all_reset(host)){
4226 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4227 if(host->use_dma && host->dma_ops->init)
4228 host->dma_ops->init(host);
4231 * Restore the initial value at FIFOTH register
4232 * And Invalidate the prev_blksz with zero
4234 mci_writel(host, FIFOTH, host->fifoth_val);
4235 host->prev_blksz = 0;
4236 /* Put in max timeout */
4237 mci_writel(host, TMOUT, 0xFFFFFFFF);
4239 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4240 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4242 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4243 regs |= SDMMC_INT_CD;
4244 mci_writel(host, INTMASK, regs);
4245 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4246 /*only for sdmmc controller*/
4247 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4248 enable_irq(host->irq);
4251 for(i = 0; i < host->num_slots; i++){
4252 struct dw_mci_slot *slot = host->slot[i];
4255 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4256 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4257 dw_mci_setup_bus(slot, true);
4263 EXPORT_SYMBOL(dw_mci_resume);
4264 #endif /* CONFIG_PM_SLEEP */
4266 static int __init dw_mci_init(void)
4268 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4272 static void __exit dw_mci_exit(void)
4276 module_init(dw_mci_init);
4277 module_exit(dw_mci_exit);
4279 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4280 MODULE_AUTHOR("NXP Semiconductor VietNam");
4281 MODULE_AUTHOR("Imagination Technologies Ltd");
4282 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4283 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4284 MODULE_LICENSE("GPL v2");