2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
50 #include "rk_sdmmc_dbg.h"
51 #include <linux/regulator/rockchip_io_vol_domain.h>
52 #include "../../clk/rockchip/clk-ops.h"
54 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
56 /* Common flag combinations */
57 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
58 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
60 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
62 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
63 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
64 #define DW_MCI_SEND_STATUS 1
65 #define DW_MCI_RECV_STATUS 2
66 #define DW_MCI_DMA_THRESHOLD 16
68 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
69 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
71 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
72 #define SDMMC_DATA_TIMEOUT_SD 500
73 #define SDMMC_DATA_TIMEOUT_SDIO 250
74 #define SDMMC_DATA_TIMEOUT_EMMC 2500
76 #define SDMMC_CMD_RTO_MAX_HOLD 200
77 #define SDMMC_WAIT_FOR_UNBUSY 2500
79 #ifdef CONFIG_MMC_DW_IDMAC
80 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
81 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
82 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
86 u32 des0; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 u32 des1; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
99 u32 des2; /* buffer 1 physical address */
101 u32 des3; /* buffer 2 physical address */
103 #endif /* CONFIG_MMC_DW_IDMAC */
105 static const u8 tuning_blk_pattern_4bit[] = {
106 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
107 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
108 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
109 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
110 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
111 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
112 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
113 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
116 static const u8 tuning_blk_pattern_8bit[] = {
117 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
118 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
119 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
120 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
121 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
122 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
123 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
124 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
125 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
126 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
127 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
128 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
129 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
130 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
131 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
132 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
135 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
136 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
137 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
138 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
140 /*printk the all register of current host*/
142 static int dw_mci_regs_printk(struct dw_mci *host)
144 struct sdmmc_reg *regs = dw_mci_regs;
146 while( regs->name != 0 ){
147 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
150 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
155 #if defined(CONFIG_DEBUG_FS)
156 static int dw_mci_req_show(struct seq_file *s, void *v)
158 struct dw_mci_slot *slot = s->private;
159 struct mmc_request *mrq;
160 struct mmc_command *cmd;
161 struct mmc_command *stop;
162 struct mmc_data *data;
164 /* Make sure we get a consistent snapshot */
165 spin_lock_bh(&slot->host->lock);
175 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
176 cmd->opcode, cmd->arg, cmd->flags,
177 cmd->resp[0], cmd->resp[1], cmd->resp[2],
178 cmd->resp[2], cmd->error);
180 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
181 data->bytes_xfered, data->blocks,
182 data->blksz, data->flags, data->error);
185 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
186 stop->opcode, stop->arg, stop->flags,
187 stop->resp[0], stop->resp[1], stop->resp[2],
188 stop->resp[2], stop->error);
191 spin_unlock_bh(&slot->host->lock);
196 static int dw_mci_req_open(struct inode *inode, struct file *file)
198 return single_open(file, dw_mci_req_show, inode->i_private);
201 static const struct file_operations dw_mci_req_fops = {
202 .owner = THIS_MODULE,
203 .open = dw_mci_req_open,
206 .release = single_release,
209 static int dw_mci_regs_show(struct seq_file *s, void *v)
211 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
212 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
213 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
214 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
215 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
216 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
221 static int dw_mci_regs_open(struct inode *inode, struct file *file)
223 return single_open(file, dw_mci_regs_show, inode->i_private);
226 static const struct file_operations dw_mci_regs_fops = {
227 .owner = THIS_MODULE,
228 .open = dw_mci_regs_open,
231 .release = single_release,
234 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
236 struct mmc_host *mmc = slot->mmc;
237 struct dw_mci *host = slot->host;
241 root = mmc->debugfs_root;
245 node = debugfs_create_file("regs", S_IRUSR, root, host,
250 node = debugfs_create_file("req", S_IRUSR, root, slot,
255 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
259 node = debugfs_create_x32("pending_events", S_IRUSR, root,
260 (u32 *)&host->pending_events);
264 node = debugfs_create_x32("completed_events", S_IRUSR, root,
265 (u32 *)&host->completed_events);
272 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
274 #endif /* defined(CONFIG_DEBUG_FS) */
276 static void dw_mci_set_timeout(struct dw_mci *host)
278 /* timeout (maximum) */
279 mci_writel(host, TMOUT, 0xffffffff);
282 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
284 struct mmc_data *data;
285 struct dw_mci_slot *slot = mmc_priv(mmc);
286 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
288 cmd->error = -EINPROGRESS;
292 if (cmdr == MMC_STOP_TRANSMISSION)
293 cmdr |= SDMMC_CMD_STOP;
295 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
297 if (cmd->flags & MMC_RSP_PRESENT) {
298 /* We expect a response, so set this bit */
299 cmdr |= SDMMC_CMD_RESP_EXP;
300 if (cmd->flags & MMC_RSP_136)
301 cmdr |= SDMMC_CMD_RESP_LONG;
304 if (cmd->flags & MMC_RSP_CRC)
305 cmdr |= SDMMC_CMD_RESP_CRC;
309 cmdr |= SDMMC_CMD_DAT_EXP;
310 if (data->flags & MMC_DATA_STREAM)
311 cmdr |= SDMMC_CMD_STRM_MODE;
312 if (data->flags & MMC_DATA_WRITE)
313 cmdr |= SDMMC_CMD_DAT_WR;
316 if (drv_data && drv_data->prepare_command)
317 drv_data->prepare_command(slot->host, &cmdr);
323 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
325 struct mmc_command *stop;
331 stop = &host->stop_abort;
333 memset(stop, 0, sizeof(struct mmc_command));
335 if (cmdr == MMC_READ_SINGLE_BLOCK ||
336 cmdr == MMC_READ_MULTIPLE_BLOCK ||
337 cmdr == MMC_WRITE_BLOCK ||
338 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
339 stop->opcode = MMC_STOP_TRANSMISSION;
341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
342 } else if (cmdr == SD_IO_RW_EXTENDED) {
343 stop->opcode = SD_IO_RW_DIRECT;
344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
345 ((cmd->arg >> 28) & 0x7);
346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
351 cmdr = stop->opcode | SDMMC_CMD_STOP |
352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
357 static void dw_mci_start_command(struct dw_mci *host,
358 struct mmc_command *cmd, u32 cmd_flags)
360 struct dw_mci_slot *slot = host->slot[0];
361 /*temporality fix slot[0] due to host->num_slots equal to 1*/
363 host->pre_cmd = host->cmd;
366 "start command: ARGR=0x%08x CMDR=0x%08x\n",
367 cmd->arg, cmd_flags);
369 if(SD_SWITCH_VOLTAGE == cmd->opcode){
370 /*confirm non-low-power mode*/
371 mci_writel(host, CMDARG, 0);
372 dw_mci_disable_low_power(slot);
374 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
375 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
377 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
380 mci_writel(host, CMDARG, cmd->arg);
383 /* fix the value to 1 in some Soc,for example RK3188. */
384 if(host->mmc->hold_reg_flag)
385 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
387 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
391 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
393 dw_mci_start_command(host, data->stop, host->stop_cmdr);
396 /* DMA interface functions */
397 static void dw_mci_stop_dma(struct dw_mci *host)
399 if (host->using_dma) {
400 /* Fixme: No need to terminate edma, may cause flush op */
401 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
402 host->dma_ops->stop(host);
403 host->dma_ops->cleanup(host);
406 /* Data transfer was stopped by the interrupt handler */
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
410 static int dw_mci_get_dma_dir(struct mmc_data *data)
412 if (data->flags & MMC_DATA_WRITE)
413 return DMA_TO_DEVICE;
415 return DMA_FROM_DEVICE;
418 #ifdef CONFIG_MMC_DW_IDMAC
419 static void dw_mci_dma_cleanup(struct dw_mci *host)
421 struct mmc_data *data = host->data;
424 if (!data->host_cookie)
425 dma_unmap_sg(host->dev,
428 dw_mci_get_dma_dir(data));
431 static void dw_mci_idmac_reset(struct dw_mci *host)
433 u32 bmod = mci_readl(host, BMOD);
434 /* Software reset of DMA */
435 bmod |= SDMMC_IDMAC_SWRESET;
436 mci_writel(host, BMOD, bmod);
439 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
443 /* Disable and reset the IDMAC interface */
444 temp = mci_readl(host, CTRL);
445 temp &= ~SDMMC_CTRL_USE_IDMAC;
446 temp |= SDMMC_CTRL_DMA_RESET;
447 mci_writel(host, CTRL, temp);
449 /* Stop the IDMAC running */
450 temp = mci_readl(host, BMOD);
451 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
452 temp |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, temp);
456 static void dw_mci_idmac_complete_dma(void *arg)
458 struct dw_mci *host = arg;
459 struct mmc_data *data = host->data;
461 dev_vdbg(host->dev, "DMA complete\n");
464 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
465 host->mrq->cmd->opcode,host->mrq->cmd->arg,
466 data->blocks,data->blksz,mmc_hostname(host->mmc));
469 host->dma_ops->cleanup(host);
472 * If the card was removed, data will be NULL. No point in trying to
473 * send the stop command or waiting for NBUSY in this case.
476 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
477 tasklet_schedule(&host->tasklet);
481 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
485 struct idmac_desc *desc = host->sg_cpu;
487 for (i = 0; i < sg_len; i++, desc++) {
488 unsigned int length = sg_dma_len(&data->sg[i]);
489 u32 mem_addr = sg_dma_address(&data->sg[i]);
491 /* Set the OWN bit and disable interrupts for this descriptor */
492 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
495 IDMAC_SET_BUFFER1_SIZE(desc, length);
497 /* Physical address to DMA to/from */
498 desc->des2 = mem_addr;
501 /* Set first descriptor */
503 desc->des0 |= IDMAC_DES0_FD;
505 /* Set last descriptor */
506 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
507 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
508 desc->des0 |= IDMAC_DES0_LD;
513 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
517 dw_mci_translate_sglist(host, host->data, sg_len);
519 /* Select IDMAC interface */
520 temp = mci_readl(host, CTRL);
521 temp |= SDMMC_CTRL_USE_IDMAC;
522 mci_writel(host, CTRL, temp);
526 /* Enable the IDMAC */
527 temp = mci_readl(host, BMOD);
528 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
529 mci_writel(host, BMOD, temp);
531 /* Start it running */
532 mci_writel(host, PLDMND, 1);
535 static int dw_mci_idmac_init(struct dw_mci *host)
537 struct idmac_desc *p;
540 /* Number of descriptors in the ring buffer */
541 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
543 /* Forward link the descriptor list */
544 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
545 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
547 /* Set the last descriptor as the end-of-ring descriptor */
548 p->des3 = host->sg_dma;
549 p->des0 = IDMAC_DES0_ER;
551 dw_mci_idmac_reset(host);
553 /* Mask out interrupts - get Tx & Rx complete only */
554 mci_writel(host, IDSTS, IDMAC_INT_CLR);
555 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
558 /* Set the descriptor base address */
559 mci_writel(host, DBADDR, host->sg_dma);
563 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
564 .init = dw_mci_idmac_init,
565 .start = dw_mci_idmac_start_dma,
566 .stop = dw_mci_idmac_stop_dma,
567 .complete = dw_mci_idmac_complete_dma,
568 .cleanup = dw_mci_dma_cleanup,
572 static void dw_mci_edma_cleanup(struct dw_mci *host)
574 struct mmc_data *data = host->data;
577 if (!data->host_cookie)
578 dma_unmap_sg(host->dev,
579 data->sg, data->sg_len,
580 dw_mci_get_dma_dir(data));
583 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
585 dmaengine_terminate_all(host->dms->ch);
588 static void dw_mci_edmac_complete_dma(void *arg)
590 struct dw_mci *host = arg;
591 struct mmc_data *data = host->data;
593 dev_vdbg(host->dev, "DMA complete\n");
596 if(data->flags & MMC_DATA_READ)
597 /* Invalidate cache after read */
598 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
599 data->sg_len, DMA_FROM_DEVICE);
601 host->dma_ops->cleanup(host);
604 * If the card was removed, data will be NULL. No point in trying to
605 * send the stop command or waiting for NBUSY in this case.
608 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
609 tasklet_schedule(&host->tasklet);
613 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
615 struct dma_slave_config slave_config;
616 struct dma_async_tx_descriptor *desc = NULL;
617 struct scatterlist *sgl = host->data->sg;
618 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
619 u32 sg_elems = host->data->sg_len;
620 u32 fifoth_val, mburst;
624 /* Set external dma config: burst size, burst width*/
625 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
626 slave_config.src_addr = slave_config.dst_addr;
627 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
628 slave_config.src_addr_width = slave_config.dst_addr_width;
630 /* Match FIFO dma burst MSIZE with external dma config*/
631 fifoth_val = mci_readl(host, FIFOTH);
632 mburst = mszs[(fifoth_val >> 28) & 0x7];
634 /* edmac limit burst to 16, but work around for rk3036 to 8 */
635 if (unlikely(cpu_is_rk3036()))
640 slave_config.dst_maxburst = (mburst > burst_limit) ? burst_limit : mburst;
641 slave_config.src_maxburst = slave_config.dst_maxburst;
643 if(host->data->flags & MMC_DATA_WRITE){
644 slave_config.direction = DMA_MEM_TO_DEV;
645 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
647 dev_err(host->dev, "error in dw_mci edma configuration.\n");
651 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
652 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
654 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
657 /* Set dw_mci_edmac_complete_dma as callback */
658 desc->callback = dw_mci_edmac_complete_dma;
659 desc->callback_param = (void *)host;
660 dmaengine_submit(desc);
662 /* Flush cache before write */
663 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
664 sg_elems, DMA_TO_DEVICE);
665 dma_async_issue_pending(host->dms->ch);
668 slave_config.direction = DMA_DEV_TO_MEM;
669 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
671 dev_err(host->dev, "error in dw_mci edma configuration.\n");
674 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
675 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
677 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
680 /* set dw_mci_edmac_complete_dma as callback */
681 desc->callback = dw_mci_edmac_complete_dma;
682 desc->callback_param = (void *)host;
683 dmaengine_submit(desc);
684 dma_async_issue_pending(host->dms->ch);
688 static int dw_mci_edmac_init(struct dw_mci *host)
690 /* Request external dma channel, SHOULD decide chn in dts */
692 host->dms = (struct dw_mci_dma_slave *)kmalloc
693 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
694 if (NULL == host->dms) {
695 dev_err(host->dev, "No enough memory to alloc dms.\n");
699 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
700 if (!host->dms->ch) {
701 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
702 host->dms->ch->chan_id);
709 if (NULL != host->dms) {
717 static void dw_mci_edmac_exit(struct dw_mci *host)
719 if (NULL != host->dms) {
720 if (NULL != host->dms->ch) {
721 dma_release_channel(host->dms->ch);
722 host->dms->ch = NULL;
729 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
730 .init = dw_mci_edmac_init,
731 .exit = dw_mci_edmac_exit,
732 .start = dw_mci_edmac_start_dma,
733 .stop = dw_mci_edmac_stop_dma,
734 .complete = dw_mci_edmac_complete_dma,
735 .cleanup = dw_mci_edma_cleanup,
737 #endif /* CONFIG_MMC_DW_IDMAC */
739 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
740 struct mmc_data *data,
743 struct scatterlist *sg;
744 unsigned int i, sg_len;
746 if (!next && data->host_cookie)
747 return data->host_cookie;
750 * We don't do DMA on "complex" transfers, i.e. with
751 * non-word-aligned buffers or lengths. Also, we don't bother
752 * with all the DMA setup overhead for short transfers.
754 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
760 for_each_sg(data->sg, sg, data->sg_len, i) {
761 if (sg->offset & 3 || sg->length & 3)
765 sg_len = dma_map_sg(host->dev,
768 dw_mci_get_dma_dir(data));
773 data->host_cookie = sg_len;
778 static void dw_mci_pre_req(struct mmc_host *mmc,
779 struct mmc_request *mrq,
782 struct dw_mci_slot *slot = mmc_priv(mmc);
783 struct mmc_data *data = mrq->data;
785 if (!slot->host->use_dma || !data)
788 if (data->host_cookie) {
789 data->host_cookie = 0;
793 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
794 data->host_cookie = 0;
797 static void dw_mci_post_req(struct mmc_host *mmc,
798 struct mmc_request *mrq,
801 struct dw_mci_slot *slot = mmc_priv(mmc);
802 struct mmc_data *data = mrq->data;
804 if (!slot->host->use_dma || !data)
807 if (data->host_cookie)
808 dma_unmap_sg(slot->host->dev,
811 dw_mci_get_dma_dir(data));
812 data->host_cookie = 0;
815 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
817 #ifdef CONFIG_MMC_DW_IDMAC
818 unsigned int blksz = data->blksz;
819 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
820 u32 fifo_width = 1 << host->data_shift;
821 u32 blksz_depth = blksz / fifo_width, fifoth_val;
822 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
823 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
825 tx_wmark = (host->fifo_depth) / 2;
826 tx_wmark_invers = host->fifo_depth - tx_wmark;
830 * if blksz is not a multiple of the FIFO width
832 if (blksz % fifo_width) {
839 if (!((blksz_depth % mszs[idx]) ||
840 (tx_wmark_invers % mszs[idx]))) {
842 rx_wmark = mszs[idx] - 1;
847 * If idx is '0', it won't be tried
848 * Thus, initial values are uesed
851 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
852 mci_writel(host, FIFOTH, fifoth_val);
857 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
859 unsigned int blksz = data->blksz;
860 u32 blksz_depth, fifo_depth;
863 WARN_ON(!(data->flags & MMC_DATA_READ));
865 if (host->timing != MMC_TIMING_MMC_HS200 &&
866 host->timing != MMC_TIMING_UHS_SDR104)
869 blksz_depth = blksz / (1 << host->data_shift);
870 fifo_depth = host->fifo_depth;
872 if (blksz_depth > fifo_depth)
876 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
877 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
878 * Currently just choose blksz.
881 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
885 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
888 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
896 /* If we don't have a channel, we can't do DMA */
900 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
902 /* Fixme: No need terminate edma, may cause flush op */
903 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
904 host->dma_ops->stop(host);
911 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
912 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
916 * Decide the MSIZE and RX/TX Watermark.
917 * If current block size is same with previous size,
918 * no need to update fifoth.
920 if (host->prev_blksz != data->blksz)
921 dw_mci_adjust_fifoth(host, data);
924 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
926 /* Enable the DMA interface */
927 temp = mci_readl(host, CTRL);
928 temp |= SDMMC_CTRL_DMA_ENABLE;
929 mci_writel(host, CTRL, temp);
931 /* Disable RX/TX IRQs, let DMA handle it */
932 spin_lock_irqsave(&host->slock, flags);
933 temp = mci_readl(host, INTMASK);
934 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
935 mci_writel(host, INTMASK, temp);
936 spin_unlock_irqrestore(&host->slock, flags);
938 host->dma_ops->start(host, sg_len);
943 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
948 data->error = -EINPROGRESS;
950 //WARN_ON(host->data);
955 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
957 if (data->flags & MMC_DATA_READ) {
958 host->dir_status = DW_MCI_RECV_STATUS;
959 dw_mci_ctrl_rd_thld(host, data);
961 host->dir_status = DW_MCI_SEND_STATUS;
964 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
965 data->blocks, data->blksz, mmc_hostname(host->mmc));
967 if (dw_mci_submit_data_dma(host, data)) {
968 int flags = SG_MITER_ATOMIC;
969 if (host->data->flags & MMC_DATA_READ)
970 flags |= SG_MITER_TO_SG;
972 flags |= SG_MITER_FROM_SG;
974 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
976 host->part_buf_start = 0;
977 host->part_buf_count = 0;
979 spin_lock_irqsave(&host->slock, flag);
980 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
981 temp = mci_readl(host, INTMASK);
982 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
983 mci_writel(host, INTMASK, temp);
984 spin_unlock_irqrestore(&host->slock, flag);
986 temp = mci_readl(host, CTRL);
987 temp &= ~SDMMC_CTRL_DMA_ENABLE;
988 mci_writel(host, CTRL, temp);
991 * Use the initial fifoth_val for PIO mode.
992 * If next issued data may be transfered by DMA mode,
993 * prev_blksz should be invalidated.
995 mci_writel(host, FIFOTH, host->fifoth_val);
996 host->prev_blksz = 0;
999 * Keep the current block size.
1000 * It will be used to decide whether to update
1001 * fifoth register next time.
1003 host->prev_blksz = data->blksz;
1007 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1009 struct dw_mci *host = slot->host;
1010 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1011 unsigned int cmd_status = 0;
1012 #ifdef SDMMC_WAIT_FOR_UNBUSY
1014 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1016 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1018 ret = time_before(jiffies, timeout);
1019 cmd_status = mci_readl(host, STATUS);
1020 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1024 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1025 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1028 mci_writel(host, CMDARG, arg);
1030 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1031 if(cmd & SDMMC_CMD_UPD_CLK)
1032 timeout = jiffies + msecs_to_jiffies(50);
1034 timeout = jiffies + msecs_to_jiffies(500);
1035 while (time_before(jiffies, timeout)) {
1036 cmd_status = mci_readl(host, CMD);
1037 if (!(cmd_status & SDMMC_CMD_START))
1040 dev_err(&slot->mmc->class_dev,
1041 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1042 cmd, arg, cmd_status);
1045 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1047 struct dw_mci *host = slot->host;
1048 unsigned int tempck,clock = slot->clock;
1053 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1054 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1057 mci_writel(host, CLKENA, 0);
1058 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1059 if(host->svi_flags == 0)
1060 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 } else if (clock != host->current_speed || force_clkinit) {
1065 div = host->bus_hz / clock;
1066 if (host->bus_hz % clock && host->bus_hz > clock)
1068 * move the + 1 after the divide to prevent
1069 * over-clocking the card.
1073 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1075 if ((clock << div) != slot->__clk_old || force_clkinit) {
1076 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1077 dev_info(&slot->mmc->class_dev,
1078 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1079 slot->id, host->bus_hz, clock,
1082 host->set_speed = tempck;
1083 host->set_div = div;
1087 mci_writel(host, CLKENA, 0);
1088 mci_writel(host, CLKSRC, 0);
1092 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1094 if(clock <= 400*1000){
1095 MMC_DBG_BOOT_FUNC(host->mmc,
1096 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1097 clock * 2, mmc_hostname(host->mmc));
1098 /* clk_mmc will change parents to 24MHz xtal*/
1099 clk_set_rate(host->clk_mmc, clock * 2);
1102 host->set_div = div;
1106 MMC_DBG_BOOT_FUNC(host->mmc,
1107 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1108 mmc_hostname(host->mmc));
1111 MMC_DBG_ERR_FUNC(host->mmc,
1112 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1113 mmc_hostname(host->mmc));
1115 host->set_div = div;
1116 host->bus_hz = host->set_speed * 2;
1117 MMC_DBG_BOOT_FUNC(host->mmc,
1118 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1119 div, host->bus_hz, mmc_hostname(host->mmc));
1121 /* BUG may be here, come on, Linux BSP engineer looks!
1122 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1123 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1124 some oops happened like that:
1125 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1126 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1127 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1128 mmc0: new high speed DDR MMC card at address 0001
1129 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1131 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1132 mmcblk0: retrying using single block read
1133 mmcblk0: error -110 sending status command, retrying
1135 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1138 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1139 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1142 host->set_div = div;
1143 host->bus_hz = host->set_speed * 2;
1144 MMC_DBG_BOOT_FUNC(host->mmc,
1145 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1146 div, host->bus_hz, mmc_hostname(host->mmc));
1149 if (host->verid < DW_MMC_240A)
1150 clk_set_rate(host->clk_mmc,(host->bus_hz));
1152 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1158 /* set clock to desired speed */
1159 mci_writel(host, CLKDIV, div);
1163 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1165 /* enable clock; only low power if no SDIO */
1166 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1168 if (host->verid < DW_MMC_240A)
1169 sdio_int = SDMMC_INT_SDIO(slot->id);
1171 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1173 if (!(mci_readl(host, INTMASK) & sdio_int))
1174 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1175 mci_writel(host, CLKENA, clk_en_a);
1179 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1180 /* keep the clock with reflecting clock dividor */
1181 slot->__clk_old = clock << div;
1184 host->current_speed = clock;
1186 if(slot->ctype != slot->pre_ctype)
1187 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1189 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1190 mmc_hostname(host->mmc));
1191 slot->pre_ctype = slot->ctype;
1193 /* Set the current slot bus width */
1194 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1197 extern struct mmc_card *this_card;
1198 static void dw_mci_wait_unbusy(struct dw_mci *host)
1201 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1202 unsigned long time_loop;
1203 unsigned int status;
1206 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1208 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1209 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1210 /* Special care for (secure)erase timeout calculation */
1212 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1215 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1216 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1217 300000 * (this_card->ext_csd.sec_erase_mult)) :
1218 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1222 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1223 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1224 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1225 timeout = SDMMC_DATA_TIMEOUT_SD;
1228 time_loop = jiffies + msecs_to_jiffies(timeout);
1230 status = mci_readl(host, STATUS);
1231 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1233 } while (time_before(jiffies, time_loop));
1238 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1241 * 0--status is busy.
1242 * 1--status is unbusy.
1244 int dw_mci_card_busy(struct mmc_host *mmc)
1246 struct dw_mci_slot *slot = mmc_priv(mmc);
1247 struct dw_mci *host = slot->host;
1249 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1250 host->svi_flags, mmc_hostname(host->mmc));
1253 if(host->svi_flags == 0){
1255 host->svi_flags = 1;
1256 return host->svi_flags;
1259 host->svi_flags = 0;
1260 return host->svi_flags;
1266 static void __dw_mci_start_request(struct dw_mci *host,
1267 struct dw_mci_slot *slot,
1268 struct mmc_command *cmd)
1270 struct mmc_request *mrq;
1271 struct mmc_data *data;
1275 if (host->pdata->select_slot)
1276 host->pdata->select_slot(slot->id);
1278 host->cur_slot = slot;
1281 dw_mci_wait_unbusy(host);
1283 host->pending_events = 0;
1284 host->completed_events = 0;
1285 host->data_status = 0;
1289 dw_mci_set_timeout(host);
1290 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1291 mci_writel(host, BLKSIZ, data->blksz);
1294 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1296 /* this is the first command, send the initialization clock */
1297 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1298 cmdflags |= SDMMC_CMD_INIT;
1301 dw_mci_submit_data(host, data);
1305 dw_mci_start_command(host, cmd, cmdflags);
1308 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1311 static void dw_mci_start_request(struct dw_mci *host,
1312 struct dw_mci_slot *slot)
1314 struct mmc_request *mrq = slot->mrq;
1315 struct mmc_command *cmd;
1317 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1318 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1320 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1321 __dw_mci_start_request(host, slot, cmd);
1324 /* must be called with host->lock held */
1325 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1326 struct mmc_request *mrq)
1328 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1333 if (host->state == STATE_IDLE) {
1334 host->state = STATE_SENDING_CMD;
1335 dw_mci_start_request(host, slot);
1337 list_add_tail(&slot->queue_node, &host->queue);
1341 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1343 struct dw_mci_slot *slot = mmc_priv(mmc);
1344 struct dw_mci *host = slot->host;
1349 * The check for card presence and queueing of the request must be
1350 * atomic, otherwise the card could be removed in between and the
1351 * request wouldn't fail until another card was inserted.
1353 spin_lock_bh(&host->lock);
1355 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1356 spin_unlock_bh(&host->lock);
1357 mrq->cmd->error = -ENOMEDIUM;
1358 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1359 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1361 mmc_request_done(mmc, mrq);
1365 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1366 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1368 dw_mci_queue_request(host, slot, mrq);
1370 spin_unlock_bh(&host->lock);
1373 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1375 struct dw_mci_slot *slot = mmc_priv(mmc);
1376 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1377 struct dw_mci *host = slot->host;
1379 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1382 #ifdef SDMMC_WAIT_FOR_UNBUSY
1383 unsigned long time_loop;
1386 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1387 if(host->svi_flags == 1)
1388 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1390 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1392 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1395 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1396 printk("%d..%s: no card. [%s]\n", \
1397 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1402 ret = time_before(jiffies, time_loop);
1403 regs = mci_readl(slot->host, STATUS);
1404 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1410 printk("slot->flags = %lu ", slot->flags);
1411 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1412 if(host->svi_flags != 1)
1415 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1416 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1420 switch (ios->bus_width) {
1421 case MMC_BUS_WIDTH_4:
1422 slot->ctype = SDMMC_CTYPE_4BIT;
1424 case MMC_BUS_WIDTH_8:
1425 slot->ctype = SDMMC_CTYPE_8BIT;
1428 /* set default 1 bit mode */
1429 slot->ctype = SDMMC_CTYPE_1BIT;
1430 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1433 regs = mci_readl(slot->host, UHS_REG);
1436 if (ios->timing == MMC_TIMING_UHS_DDR50)
1437 regs |= ((0x1 << slot->id) << 16);
1439 regs &= ~((0x1 << slot->id) << 16);
1441 mci_writel(slot->host, UHS_REG, regs);
1442 slot->host->timing = ios->timing;
1445 * Use mirror of ios->clock to prevent race with mmc
1446 * core ios update when finding the minimum.
1448 slot->clock = ios->clock;
1450 if (drv_data && drv_data->set_ios)
1451 drv_data->set_ios(slot->host, ios);
1453 /* Slot specific timing and width adjustment */
1454 dw_mci_setup_bus(slot, false);
1458 switch (ios->power_mode) {
1460 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1462 if (slot->host->pdata->setpower)
1463 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1464 regs = mci_readl(slot->host, PWREN);
1465 regs |= (1 << slot->id);
1466 mci_writel(slot->host, PWREN, regs);
1469 /* Power down slot */
1470 if(slot->host->pdata->setpower)
1471 slot->host->pdata->setpower(slot->id, 0);
1472 regs = mci_readl(slot->host, PWREN);
1473 regs &= ~(1 << slot->id);
1474 mci_writel(slot->host, PWREN, regs);
1481 static int dw_mci_get_ro(struct mmc_host *mmc)
1484 struct dw_mci_slot *slot = mmc_priv(mmc);
1485 struct dw_mci_board *brd = slot->host->pdata;
1487 /* Use platform get_ro function, else try on board write protect */
1488 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1490 else if(brd->get_ro)
1491 read_only = brd->get_ro(slot->id);
1492 else if(gpio_is_valid(slot->wp_gpio))
1493 read_only = gpio_get_value(slot->wp_gpio);
1496 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1498 dev_dbg(&mmc->class_dev, "card is %s\n",
1499 read_only ? "read-only" : "read-write");
1504 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1506 struct dw_mci_slot *slot = mmc_priv(mmc);
1507 struct dw_mci *host = slot->host;
1508 /*struct dw_mci_board *brd = slot->host->pdata;*/
1510 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1513 spin_lock_bh(&host->lock);
1516 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1518 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1520 spin_unlock_bh(&host->lock);
1522 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1523 if(__clk_is_enabled(host->hclk_mmc) == false)
1524 clk_prepare_enable(host->hclk_mmc);
1525 if(__clk_is_enabled(host->clk_mmc) == false)
1526 clk_prepare_enable(host->clk_mmc);
1528 if(__clk_is_enabled(host->clk_mmc) == true)
1529 clk_disable_unprepare(slot->host->clk_mmc);
1530 if(__clk_is_enabled(host->hclk_mmc) == true)
1531 clk_disable_unprepare(slot->host->hclk_mmc);
1534 mmc_detect_change(slot->mmc, 20);
1540 static int dw_mci_get_cd(struct mmc_host *mmc)
1543 struct dw_mci_slot *slot = mmc_priv(mmc);
1544 struct dw_mci_board *brd = slot->host->pdata;
1545 struct dw_mci *host = slot->host;
1546 int gpio_cd = mmc_gpio_get_cd(mmc);
1547 int force_jtag_bit, force_jtag_reg;
1551 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1552 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1553 gpio_cd = slot->cd_gpio;
1554 irq = gpio_to_irq(gpio_cd);
1555 if (gpio_is_valid(gpio_cd)) {
1556 gpio_val = gpio_get_value(gpio_cd);
1557 if (soc_is_rk3036()) {
1558 force_jtag_bit = 11;
1559 force_jtag_reg = RK312X_GRF_SOC_CON0;
1560 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1561 force_jtag_reg = RK312X_GRF_SOC_CON0;
1565 if (gpio_val == gpio_get_value(gpio_cd)) {
1566 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1568 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1569 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1570 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1573 dw_mci_ctrl_all_reset(host);
1575 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1576 /* Really card detected: SHOULD disable force_jtag */
1577 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1582 gpio_val = gpio_get_value(gpio_cd);
1584 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1585 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1586 return slot->last_detect_state;
1589 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1593 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1594 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1596 /* Use platform get_cd function, else try onboard card detect */
1597 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1599 else if (brd->get_cd)
1600 present = !brd->get_cd(slot->id);
1601 else if (!IS_ERR_VALUE(gpio_cd))
1604 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1607 spin_lock_bh(&host->lock);
1609 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1610 dev_dbg(&mmc->class_dev, "card is present\n");
1612 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1613 dev_dbg(&mmc->class_dev, "card is not present\n");
1615 spin_unlock_bh(&host->lock);
1622 * Dts Should caps emmc controller with poll-hw-reset
1624 static void dw_mci_hw_reset(struct mmc_host *mmc)
1626 struct dw_mci_slot *slot = mmc_priv(mmc);
1627 struct dw_mci *host = slot->host;
1632 unsigned long timeout;
1635 /* (1) CMD12 to end any transfer in process */
1636 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1637 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1639 if(host->mmc->hold_reg_flag)
1640 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1641 mci_writel(host, CMDARG, 0);
1643 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1645 timeout = jiffies + msecs_to_jiffies(500);
1647 ret = time_before(jiffies, timeout);
1648 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1653 MMC_DBG_ERR_FUNC(host->mmc,
1654 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1655 __func__, mmc_hostname(host->mmc));
1657 /* (2) wait DTO, even if no response is sent back by card */
1659 timeout = jiffies + msecs_to_jiffies(5);
1661 ret = time_before(jiffies, timeout);
1662 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1663 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1669 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1671 /* Software reset - BMOD[0] for IDMA only */
1672 regs = mci_readl(host, BMOD);
1673 regs |= SDMMC_IDMAC_SWRESET;
1674 mci_writel(host, BMOD, regs);
1675 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1676 regs = mci_readl(host, BMOD);
1677 if(regs & SDMMC_IDMAC_SWRESET)
1678 MMC_DBG_WARN_FUNC(host->mmc,
1679 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1680 __func__, mmc_hostname(host->mmc));
1682 /* DMA reset - CTRL[2] */
1683 regs = mci_readl(host, CTRL);
1684 regs |= SDMMC_CTRL_DMA_RESET;
1685 mci_writel(host, CTRL, regs);
1686 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1687 regs = mci_readl(host, CTRL);
1688 if(regs & SDMMC_CTRL_DMA_RESET)
1689 MMC_DBG_WARN_FUNC(host->mmc,
1690 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1691 __func__, mmc_hostname(host->mmc));
1693 /* FIFO reset - CTRL[1] */
1694 regs = mci_readl(host, CTRL);
1695 regs |= SDMMC_CTRL_FIFO_RESET;
1696 mci_writel(host, CTRL, regs);
1697 mdelay(1); /* no timing limited, 1ms is random value */
1698 regs = mci_readl(host, CTRL);
1699 if(regs & SDMMC_CTRL_FIFO_RESET)
1700 MMC_DBG_WARN_FUNC(host->mmc,
1701 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1702 __func__, mmc_hostname(host->mmc));
1705 According to eMMC spec
1706 tRstW >= 1us ; RST_n pulse width
1707 tRSCA >= 200us ; RST_n to Command time
1708 tRSTH >= 1us ; RST_n high period
1710 mci_writel(slot->host, PWREN, 0x0);
1711 mci_writel(slot->host, RST_N, 0x0);
1713 udelay(10); /* 10us for bad quality eMMc. */
1715 mci_writel(slot->host, PWREN, 0x1);
1716 mci_writel(slot->host, RST_N, 0x1);
1718 usleep_range(500, 1000); /* at least 500(> 200us) */
1722 * Disable lower power mode.
1724 * Low power mode will stop the card clock when idle. According to the
1725 * description of the CLKENA register we should disable low power mode
1726 * for SDIO cards if we need SDIO interrupts to work.
1728 * This function is fast if low power mode is already disabled.
1730 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1732 struct dw_mci *host = slot->host;
1734 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1736 clk_en_a = mci_readl(host, CLKENA);
1738 if (clk_en_a & clken_low_pwr) {
1739 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1740 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1741 SDMMC_CMD_PRV_DAT_WAIT, 0);
1745 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1747 struct dw_mci_slot *slot = mmc_priv(mmc);
1748 struct dw_mci *host = slot->host;
1749 unsigned long flags;
1753 spin_lock_irqsave(&host->slock, flags);
1755 /* Enable/disable Slot Specific SDIO interrupt */
1756 int_mask = mci_readl(host, INTMASK);
1758 if (host->verid < DW_MMC_240A)
1759 sdio_int = SDMMC_INT_SDIO(slot->id);
1761 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1765 * Turn off low power mode if it was enabled. This is a bit of
1766 * a heavy operation and we disable / enable IRQs a lot, so
1767 * we'll leave low power mode disabled and it will get
1768 * re-enabled again in dw_mci_setup_bus().
1770 dw_mci_disable_low_power(slot);
1772 mci_writel(host, INTMASK,
1773 (int_mask | sdio_int));
1775 mci_writel(host, INTMASK,
1776 (int_mask & ~sdio_int));
1779 spin_unlock_irqrestore(&host->slock, flags);
1782 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1784 IO_DOMAIN_12 = 1200,
1785 IO_DOMAIN_18 = 1800,
1786 IO_DOMAIN_33 = 3300,
1788 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1798 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1799 __FUNCTION__, mmc_hostname(host->mmc));
1802 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1803 __FUNCTION__, mmc_hostname(host->mmc));
1807 if(cpu_is_rk3288()){
1808 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1809 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1813 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1814 __FUNCTION__, mmc_hostname(host->mmc));
1818 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1819 struct mmc_ios *ios)
1822 unsigned int value,uhs_reg;
1825 * Signal Voltage Switching is only applicable for Host Controllers
1828 if (host->verid < DW_MMC_240A)
1831 uhs_reg = mci_readl(host, UHS_REG);
1832 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1833 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1835 switch (ios->signal_voltage) {
1836 case MMC_SIGNAL_VOLTAGE_330:
1837 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1839 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1840 /* regulator_put(host->vmmc); //to be done in remove function. */
1842 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1843 __func__, regulator_get_voltage(host->vmmc), ret);
1845 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1846 " failed\n", mmc_hostname(host->mmc));
1849 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1851 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1852 __FUNCTION__, mmc_hostname(host->mmc));
1854 /* set High-power mode */
1855 value = mci_readl(host, CLKENA);
1856 value &= ~SDMMC_CLKEN_LOW_PWR;
1857 mci_writel(host,CLKENA , value);
1859 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1860 mci_writel(host,UHS_REG , uhs_reg);
1863 usleep_range(5000, 5500);
1865 /* 3.3V regulator output should be stable within 5 ms */
1866 uhs_reg = mci_readl(host, UHS_REG);
1867 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1870 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1871 mmc_hostname(host->mmc));
1874 case MMC_SIGNAL_VOLTAGE_180:
1876 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1877 /* regulator_put(host->vmmc);//to be done in remove function. */
1879 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1880 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1882 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1883 " failed\n", mmc_hostname(host->mmc));
1886 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1890 * Enable 1.8V Signal Enable in the Host Control2
1893 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1896 usleep_range(5000, 5500);
1897 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1898 __FUNCTION__,mmc_hostname(host->mmc));
1900 /* 1.8V regulator output should be stable within 5 ms */
1901 uhs_reg = mci_readl(host, UHS_REG);
1902 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1905 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1906 mmc_hostname(host->mmc));
1909 case MMC_SIGNAL_VOLTAGE_120:
1911 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1913 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1914 " failed\n", mmc_hostname(host->mmc));
1920 /* No signal voltage switch required */
1926 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1927 struct mmc_ios *ios)
1929 struct dw_mci_slot *slot = mmc_priv(mmc);
1930 struct dw_mci *host = slot->host;
1933 if (host->verid < DW_MMC_240A)
1936 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1942 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1944 struct dw_mci_slot *slot = mmc_priv(mmc);
1945 struct dw_mci *host = slot->host;
1946 const struct dw_mci_drv_data *drv_data = host->drv_data;
1947 struct dw_mci_tuning_data tuning_data;
1950 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1951 if(cpu_is_rk3036() || cpu_is_rk312x())
1954 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1955 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1956 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1957 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1958 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1959 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1960 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1964 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1965 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1966 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1969 "Undefined command(%d) for tuning\n", opcode);
1974 /* Recommend sample phase and delayline
1975 Fixme: Mix-use these three controllers will cause
1978 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1979 tuning_data.con_id = 3;
1980 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1981 tuning_data.con_id = 1;
1983 tuning_data.con_id = 0;
1985 /* 0: driver, from host->devices
1986 1: sample, from devices->host
1988 tuning_data.tuning_type = 1;
1990 if (drv_data && drv_data->execute_tuning)
1991 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1996 static void dw_mci_post_tmo(struct mmc_host *mmc)
1998 struct dw_mci_slot *slot = mmc_priv(mmc);
1999 struct dw_mci *host = slot->host;
2000 host->cur_slot->mrq = NULL;
2002 host->state = STATE_IDLE;
2005 static const struct mmc_host_ops dw_mci_ops = {
2006 .request = dw_mci_request,
2007 .pre_req = dw_mci_pre_req,
2008 .post_req = dw_mci_post_req,
2009 .set_ios = dw_mci_set_ios,
2010 .get_ro = dw_mci_get_ro,
2011 .get_cd = dw_mci_get_cd,
2012 .set_sdio_status = dw_mci_set_sdio_status,
2013 .hw_reset = dw_mci_hw_reset,
2014 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2015 .execute_tuning = dw_mci_execute_tuning,
2016 .post_tmo = dw_mci_post_tmo,
2017 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2018 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2019 .card_busy = dw_mci_card_busy,
2024 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2026 unsigned long flags;
2031 local_irq_save(flags);
2032 if(host->irq_state != irqflag)
2034 host->irq_state = irqflag;
2037 enable_irq(host->irq);
2041 disable_irq(host->irq);
2044 local_irq_restore(flags);
2048 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2049 __releases(&host->lock)
2050 __acquires(&host->lock)
2052 if(DW_MCI_SEND_STATUS == host->dir_status){
2054 if( MMC_BUS_TEST_W != host->cmd->opcode){
2055 if(host->data_status & SDMMC_INT_DCRC)
2056 host->data->error = -EILSEQ;
2057 else if(host->data_status & SDMMC_INT_EBE)
2058 host->data->error = -ETIMEDOUT;
2060 dw_mci_wait_unbusy(host);
2063 dw_mci_wait_unbusy(host);
2068 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2069 __releases(&host->lock)
2070 __acquires(&host->lock)
2072 struct dw_mci_slot *slot;
2073 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2075 //WARN_ON(host->cmd || host->data);
2077 dw_mci_deal_data_end(host, mrq);
2080 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2081 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2083 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2084 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2086 host->cur_slot->mrq = NULL;
2088 if (!list_empty(&host->queue)) {
2089 slot = list_entry(host->queue.next,
2090 struct dw_mci_slot, queue_node);
2091 list_del(&slot->queue_node);
2092 dev_vdbg(host->dev, "list not empty: %s is next\n",
2093 mmc_hostname(slot->mmc));
2094 host->state = STATE_SENDING_CMD;
2095 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2096 dw_mci_start_request(host, slot);
2098 dev_vdbg(host->dev, "list empty\n");
2099 host->state = STATE_IDLE;
2102 spin_unlock(&host->lock);
2103 mmc_request_done(prev_mmc, mrq);
2104 spin_lock(&host->lock);
2107 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2109 u32 status = host->cmd_status;
2111 host->cmd_status = 0;
2113 /* Read the response from the card (up to 16 bytes) */
2114 if (cmd->flags & MMC_RSP_PRESENT) {
2115 if (cmd->flags & MMC_RSP_136) {
2116 cmd->resp[3] = mci_readl(host, RESP0);
2117 cmd->resp[2] = mci_readl(host, RESP1);
2118 cmd->resp[1] = mci_readl(host, RESP2);
2119 cmd->resp[0] = mci_readl(host, RESP3);
2121 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2122 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2124 cmd->resp[0] = mci_readl(host, RESP0);
2128 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2129 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2133 if (status & SDMMC_INT_RTO)
2135 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2138 cmd->error = -ETIMEDOUT;
2139 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2140 cmd->error = -EILSEQ;
2141 }else if (status & SDMMC_INT_RESP_ERR){
2146 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2147 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2150 if(MMC_SEND_STATUS != cmd->opcode)
2151 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2152 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2153 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2157 /* newer ip versions need a delay between retries */
2158 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2164 static void dw_mci_tasklet_func(unsigned long priv)
2166 struct dw_mci *host = (struct dw_mci *)priv;
2167 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2168 struct mmc_data *data;
2169 struct mmc_command *cmd;
2170 enum dw_mci_state state;
2171 enum dw_mci_state prev_state;
2172 u32 status, cmd_flags;
2173 unsigned long timeout = 0;
2176 spin_lock(&host->lock);
2178 state = host->state;
2188 case STATE_SENDING_CMD:
2189 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2190 &host->pending_events))
2195 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2196 dw_mci_command_complete(host, cmd);
2197 if (cmd == host->mrq->sbc && !cmd->error) {
2198 prev_state = state = STATE_SENDING_CMD;
2199 __dw_mci_start_request(host, host->cur_slot,
2204 if (cmd->data && cmd->error) {
2205 dw_mci_stop_dma(host);
2208 send_stop_cmd(host, data);
2209 state = STATE_SENDING_STOP;
2212 /* host->data = NULL; */
2215 send_stop_abort(host, data);
2216 state = STATE_SENDING_STOP;
2219 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2222 if (!host->mrq->data || cmd->error) {
2223 dw_mci_request_end(host, host->mrq);
2227 prev_state = state = STATE_SENDING_DATA;
2230 case STATE_SENDING_DATA:
2231 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2232 dw_mci_stop_dma(host);
2235 send_stop_cmd(host, data);
2237 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2238 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2239 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2241 mci_writel(host, CMDARG, 0);
2243 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2244 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2246 if(host->mmc->hold_reg_flag)
2247 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2249 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2251 timeout = jiffies + msecs_to_jiffies(500);
2254 ret = time_before(jiffies, timeout);
2255 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2259 MMC_DBG_ERR_FUNC(host->mmc,
2260 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2261 __func__, mmc_hostname(host->mmc));
2264 send_stop_abort(host, data);
2266 state = STATE_DATA_ERROR;
2270 MMC_DBG_CMD_FUNC(host->mmc,
2271 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2272 prev_state,state, mmc_hostname(host->mmc));
2274 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2275 &host->pending_events))
2277 MMC_DBG_INFO_FUNC(host->mmc,
2278 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2279 prev_state,state,mmc_hostname(host->mmc));
2281 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2282 prev_state = state = STATE_DATA_BUSY;
2285 case STATE_DATA_BUSY:
2286 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2287 &host->pending_events))
2290 dw_mci_deal_data_end(host, host->mrq);
2291 MMC_DBG_INFO_FUNC(host->mmc,
2292 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2293 prev_state,state,mmc_hostname(host->mmc));
2295 /* host->data = NULL; */
2296 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2297 status = host->data_status;
2299 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2300 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2301 MMC_DBG_ERR_FUNC(host->mmc,
2302 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2303 prev_state,state, status, mmc_hostname(host->mmc));
2305 if (status & SDMMC_INT_DRTO) {
2306 data->error = -ETIMEDOUT;
2307 } else if (status & SDMMC_INT_DCRC) {
2308 data->error = -EILSEQ;
2309 } else if (status & SDMMC_INT_EBE &&
2310 host->dir_status == DW_MCI_SEND_STATUS){
2312 * No data CRC status was returned.
2313 * The number of bytes transferred will
2314 * be exaggerated in PIO mode.
2316 data->bytes_xfered = 0;
2317 data->error = -ETIMEDOUT;
2326 * After an error, there may be data lingering
2327 * in the FIFO, so reset it - doing so
2328 * generates a block interrupt, hence setting
2329 * the scatter-gather pointer to NULL.
2331 dw_mci_fifo_reset(host);
2333 data->bytes_xfered = data->blocks * data->blksz;
2338 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2339 prev_state,state,mmc_hostname(host->mmc));
2340 dw_mci_request_end(host, host->mrq);
2343 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2344 prev_state,state,mmc_hostname(host->mmc));
2346 if (host->mrq->sbc && !data->error) {
2347 data->stop->error = 0;
2349 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2350 prev_state,state,mmc_hostname(host->mmc));
2352 dw_mci_request_end(host, host->mrq);
2356 prev_state = state = STATE_SENDING_STOP;
2358 send_stop_cmd(host, data);
2360 if (data->stop && !data->error) {
2361 /* stop command for open-ended transfer*/
2363 send_stop_abort(host, data);
2367 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2368 prev_state,state,mmc_hostname(host->mmc));
2370 case STATE_SENDING_STOP:
2371 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2374 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2375 prev_state, state, mmc_hostname(host->mmc));
2377 /* CMD error in data command */
2378 if (host->mrq->cmd->error && host->mrq->data) {
2379 dw_mci_fifo_reset(host);
2383 host->data = NULL; */
2385 dw_mci_command_complete(host, host->mrq->stop);
2387 if (host->mrq->stop)
2388 dw_mci_command_complete(host, host->mrq->stop);
2390 host->cmd_status = 0;
2393 dw_mci_request_end(host, host->mrq);
2396 case STATE_DATA_ERROR:
2397 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2398 &host->pending_events))
2401 state = STATE_DATA_BUSY;
2404 } while (state != prev_state);
2406 host->state = state;
2408 spin_unlock(&host->lock);
2412 /* push final bytes to part_buf, only use during push */
2413 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2415 memcpy((void *)&host->part_buf, buf, cnt);
2416 host->part_buf_count = cnt;
2419 /* append bytes to part_buf, only use during push */
2420 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2422 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2423 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2424 host->part_buf_count += cnt;
2428 /* pull first bytes from part_buf, only use during pull */
2429 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2431 cnt = min(cnt, (int)host->part_buf_count);
2433 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2435 host->part_buf_count -= cnt;
2436 host->part_buf_start += cnt;
2441 /* pull final bytes from the part_buf, assuming it's just been filled */
2442 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2444 memcpy(buf, &host->part_buf, cnt);
2445 host->part_buf_start = cnt;
2446 host->part_buf_count = (1 << host->data_shift) - cnt;
2449 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2451 struct mmc_data *data = host->data;
2454 /* try and push anything in the part_buf */
2455 if (unlikely(host->part_buf_count)) {
2456 int len = dw_mci_push_part_bytes(host, buf, cnt);
2459 if (host->part_buf_count == 2) {
2460 mci_writew(host, DATA(host->data_offset),
2462 host->part_buf_count = 0;
2465 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2466 if (unlikely((unsigned long)buf & 0x1)) {
2468 u16 aligned_buf[64];
2469 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2470 int items = len >> 1;
2472 /* memcpy from input buffer into aligned buffer */
2473 memcpy(aligned_buf, buf, len);
2476 /* push data from aligned buffer into fifo */
2477 for (i = 0; i < items; ++i)
2478 mci_writew(host, DATA(host->data_offset),
2485 for (; cnt >= 2; cnt -= 2)
2486 mci_writew(host, DATA(host->data_offset), *pdata++);
2489 /* put anything remaining in the part_buf */
2491 dw_mci_set_part_bytes(host, buf, cnt);
2492 /* Push data if we have reached the expected data length */
2493 if ((data->bytes_xfered + init_cnt) ==
2494 (data->blksz * data->blocks))
2495 mci_writew(host, DATA(host->data_offset),
2500 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2502 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2503 if (unlikely((unsigned long)buf & 0x1)) {
2505 /* pull data from fifo into aligned buffer */
2506 u16 aligned_buf[64];
2507 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2508 int items = len >> 1;
2510 for (i = 0; i < items; ++i)
2511 aligned_buf[i] = mci_readw(host,
2512 DATA(host->data_offset));
2513 /* memcpy from aligned buffer into output buffer */
2514 memcpy(buf, aligned_buf, len);
2522 for (; cnt >= 2; cnt -= 2)
2523 *pdata++ = mci_readw(host, DATA(host->data_offset));
2527 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2528 dw_mci_pull_final_bytes(host, buf, cnt);
2532 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2534 struct mmc_data *data = host->data;
2537 /* try and push anything in the part_buf */
2538 if (unlikely(host->part_buf_count)) {
2539 int len = dw_mci_push_part_bytes(host, buf, cnt);
2542 if (host->part_buf_count == 4) {
2543 mci_writel(host, DATA(host->data_offset),
2545 host->part_buf_count = 0;
2548 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2549 if (unlikely((unsigned long)buf & 0x3)) {
2551 u32 aligned_buf[32];
2552 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2553 int items = len >> 2;
2555 /* memcpy from input buffer into aligned buffer */
2556 memcpy(aligned_buf, buf, len);
2559 /* push data from aligned buffer into fifo */
2560 for (i = 0; i < items; ++i)
2561 mci_writel(host, DATA(host->data_offset),
2568 for (; cnt >= 4; cnt -= 4)
2569 mci_writel(host, DATA(host->data_offset), *pdata++);
2572 /* put anything remaining in the part_buf */
2574 dw_mci_set_part_bytes(host, buf, cnt);
2575 /* Push data if we have reached the expected data length */
2576 if ((data->bytes_xfered + init_cnt) ==
2577 (data->blksz * data->blocks))
2578 mci_writel(host, DATA(host->data_offset),
2583 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2585 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2586 if (unlikely((unsigned long)buf & 0x3)) {
2588 /* pull data from fifo into aligned buffer */
2589 u32 aligned_buf[32];
2590 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2591 int items = len >> 2;
2593 for (i = 0; i < items; ++i)
2594 aligned_buf[i] = mci_readl(host,
2595 DATA(host->data_offset));
2596 /* memcpy from aligned buffer into output buffer */
2597 memcpy(buf, aligned_buf, len);
2605 for (; cnt >= 4; cnt -= 4)
2606 *pdata++ = mci_readl(host, DATA(host->data_offset));
2610 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2611 dw_mci_pull_final_bytes(host, buf, cnt);
2615 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2617 struct mmc_data *data = host->data;
2620 /* try and push anything in the part_buf */
2621 if (unlikely(host->part_buf_count)) {
2622 int len = dw_mci_push_part_bytes(host, buf, cnt);
2626 if (host->part_buf_count == 8) {
2627 mci_writeq(host, DATA(host->data_offset),
2629 host->part_buf_count = 0;
2632 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2633 if (unlikely((unsigned long)buf & 0x7)) {
2635 u64 aligned_buf[16];
2636 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2637 int items = len >> 3;
2639 /* memcpy from input buffer into aligned buffer */
2640 memcpy(aligned_buf, buf, len);
2643 /* push data from aligned buffer into fifo */
2644 for (i = 0; i < items; ++i)
2645 mci_writeq(host, DATA(host->data_offset),
2652 for (; cnt >= 8; cnt -= 8)
2653 mci_writeq(host, DATA(host->data_offset), *pdata++);
2656 /* put anything remaining in the part_buf */
2658 dw_mci_set_part_bytes(host, buf, cnt);
2659 /* Push data if we have reached the expected data length */
2660 if ((data->bytes_xfered + init_cnt) ==
2661 (data->blksz * data->blocks))
2662 mci_writeq(host, DATA(host->data_offset),
2667 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2669 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2670 if (unlikely((unsigned long)buf & 0x7)) {
2672 /* pull data from fifo into aligned buffer */
2673 u64 aligned_buf[16];
2674 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2675 int items = len >> 3;
2677 for (i = 0; i < items; ++i)
2678 aligned_buf[i] = mci_readq(host,
2679 DATA(host->data_offset));
2680 /* memcpy from aligned buffer into output buffer */
2681 memcpy(buf, aligned_buf, len);
2689 for (; cnt >= 8; cnt -= 8)
2690 *pdata++ = mci_readq(host, DATA(host->data_offset));
2694 host->part_buf = mci_readq(host, DATA(host->data_offset));
2695 dw_mci_pull_final_bytes(host, buf, cnt);
2699 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2703 /* get remaining partial bytes */
2704 len = dw_mci_pull_part_bytes(host, buf, cnt);
2705 if (unlikely(len == cnt))
2710 /* get the rest of the data */
2711 host->pull_data(host, buf, cnt);
2714 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2716 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2718 unsigned int offset;
2719 struct mmc_data *data = host->data;
2720 int shift = host->data_shift;
2723 unsigned int remain, fcnt;
2725 if(!host->mmc->bus_refs){
2726 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2730 if (!sg_miter_next(sg_miter))
2733 host->sg = sg_miter->piter.sg;
2734 buf = sg_miter->addr;
2735 remain = sg_miter->length;
2739 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2740 << shift) + host->part_buf_count;
2741 len = min(remain, fcnt);
2744 dw_mci_pull_data(host, (void *)(buf + offset), len);
2745 data->bytes_xfered += len;
2750 sg_miter->consumed = offset;
2751 status = mci_readl(host, MINTSTS);
2752 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2753 /* if the RXDR is ready read again */
2754 } while ((status & SDMMC_INT_RXDR) ||
2755 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2758 if (!sg_miter_next(sg_miter))
2760 sg_miter->consumed = 0;
2762 sg_miter_stop(sg_miter);
2766 sg_miter_stop(sg_miter);
2770 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2773 static void dw_mci_write_data_pio(struct dw_mci *host)
2775 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2777 unsigned int offset;
2778 struct mmc_data *data = host->data;
2779 int shift = host->data_shift;
2782 unsigned int fifo_depth = host->fifo_depth;
2783 unsigned int remain, fcnt;
2785 if(!host->mmc->bus_refs){
2786 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2791 if (!sg_miter_next(sg_miter))
2794 host->sg = sg_miter->piter.sg;
2795 buf = sg_miter->addr;
2796 remain = sg_miter->length;
2800 fcnt = ((fifo_depth -
2801 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2802 << shift) - host->part_buf_count;
2803 len = min(remain, fcnt);
2806 host->push_data(host, (void *)(buf + offset), len);
2807 data->bytes_xfered += len;
2812 sg_miter->consumed = offset;
2813 status = mci_readl(host, MINTSTS);
2814 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2815 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2818 if (!sg_miter_next(sg_miter))
2820 sg_miter->consumed = 0;
2822 sg_miter_stop(sg_miter);
2826 sg_miter_stop(sg_miter);
2830 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2833 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2835 if (!host->cmd_status)
2836 host->cmd_status = status;
2843 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2844 tasklet_schedule(&host->tasklet);
2847 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2849 struct dw_mci *host = dev_id;
2850 u32 pending, sdio_int;
2853 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2856 * DTO fix - version 2.10a and below, and only if internal DMA
2859 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2861 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2862 pending |= SDMMC_INT_DATA_OVER;
2866 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2867 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2868 host->cmd_status = pending;
2870 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2871 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2873 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2876 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2877 /* if there is an error report DATA_ERROR */
2878 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2879 host->data_status = pending;
2881 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2883 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2884 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2885 tasklet_schedule(&host->tasklet);
2888 if (pending & SDMMC_INT_DATA_OVER) {
2889 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2890 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2891 if (!host->data_status)
2892 host->data_status = pending;
2894 if (host->dir_status == DW_MCI_RECV_STATUS) {
2895 if (host->sg != NULL)
2896 dw_mci_read_data_pio(host, true);
2898 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2899 tasklet_schedule(&host->tasklet);
2902 if (pending & SDMMC_INT_RXDR) {
2903 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2904 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2905 dw_mci_read_data_pio(host, false);
2908 if (pending & SDMMC_INT_TXDR) {
2909 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2910 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2911 dw_mci_write_data_pio(host);
2914 if (pending & SDMMC_INT_VSI) {
2915 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2916 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2917 dw_mci_cmd_interrupt(host, pending);
2920 if (pending & SDMMC_INT_CMD_DONE) {
2921 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2922 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2923 dw_mci_cmd_interrupt(host, pending);
2926 if (pending & SDMMC_INT_CD) {
2927 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2928 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2929 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2930 queue_work(host->card_workqueue, &host->card_work);
2933 if (pending & SDMMC_INT_HLE) {
2934 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2935 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2939 /* Handle SDIO Interrupts */
2940 for (i = 0; i < host->num_slots; i++) {
2941 struct dw_mci_slot *slot = host->slot[i];
2943 if (host->verid < DW_MMC_240A)
2944 sdio_int = SDMMC_INT_SDIO(i);
2946 sdio_int = SDMMC_INT_SDIO(i + 8);
2948 if (pending & sdio_int) {
2949 mci_writel(host, RINTSTS, sdio_int);
2950 mmc_signal_sdio_irq(slot->mmc);
2956 #ifdef CONFIG_MMC_DW_IDMAC
2957 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2958 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2959 /* Handle DMA interrupts */
2960 pending = mci_readl(host, IDSTS);
2961 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2962 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2963 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2964 host->dma_ops->complete((void *)host);
2972 static void dw_mci_work_routine_card(struct work_struct *work)
2974 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2977 for (i = 0; i < host->num_slots; i++) {
2978 struct dw_mci_slot *slot = host->slot[i];
2979 struct mmc_host *mmc = slot->mmc;
2980 struct mmc_request *mrq;
2983 present = dw_mci_get_cd(mmc);
2985 /* Card insert, switch data line to uart function, and vice verse.
2986 eONLY audi chip need switched by software, using udbg tag in dts!
2988 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
2990 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2991 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
2992 mmc_hostname(host->mmc));
2994 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
2995 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
2996 mmc_hostname(host->mmc));
3000 while (present != slot->last_detect_state) {
3001 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3002 present ? "inserted" : "removed");
3003 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3004 present ? "inserted" : "removed.", mmc_hostname(mmc));
3006 dw_mci_ctrl_all_reset(host);
3007 /* Stop edma when rountine card triggered */
3008 if(cpu_is_rk3036() || cpu_is_rk312x())
3009 if(host->dma_ops && host->dma_ops->stop)
3010 host->dma_ops->stop(host);
3011 rk_send_wakeup_key();//wake up system
3012 spin_lock_bh(&host->lock);
3014 /* Card change detected */
3015 slot->last_detect_state = present;
3017 /* Clean up queue if present */
3020 if (mrq == host->mrq) {
3024 switch (host->state) {
3027 case STATE_SENDING_CMD:
3028 mrq->cmd->error = -ENOMEDIUM;
3032 case STATE_SENDING_DATA:
3033 mrq->data->error = -ENOMEDIUM;
3034 dw_mci_stop_dma(host);
3036 case STATE_DATA_BUSY:
3037 case STATE_DATA_ERROR:
3038 if (mrq->data->error == -EINPROGRESS)
3039 mrq->data->error = -ENOMEDIUM;
3043 case STATE_SENDING_STOP:
3044 mrq->stop->error = -ENOMEDIUM;
3048 dw_mci_request_end(host, mrq);
3050 list_del(&slot->queue_node);
3051 mrq->cmd->error = -ENOMEDIUM;
3053 mrq->data->error = -ENOMEDIUM;
3055 mrq->stop->error = -ENOMEDIUM;
3057 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3058 mrq->cmd->opcode, mmc_hostname(mmc));
3060 spin_unlock(&host->lock);
3061 mmc_request_done(slot->mmc, mrq);
3062 spin_lock(&host->lock);
3066 /* Power down slot */
3068 /* Clear down the FIFO */
3069 dw_mci_fifo_reset(host);
3070 #ifdef CONFIG_MMC_DW_IDMAC
3071 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3072 dw_mci_idmac_reset(host);
3077 spin_unlock_bh(&host->lock);
3079 present = dw_mci_get_cd(mmc);
3082 mmc_detect_change(slot->mmc,
3083 msecs_to_jiffies(host->pdata->detect_delay_ms));
3088 /* given a slot id, find out the device node representing that slot */
3089 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3091 struct device_node *np;
3095 if (!dev || !dev->of_node)
3098 for_each_child_of_node(dev->of_node, np) {
3099 addr = of_get_property(np, "reg", &len);
3100 if (!addr || (len < sizeof(int)))
3102 if (be32_to_cpup(addr) == slot)
3108 static struct dw_mci_of_slot_quirks {
3111 } of_slot_quirks[] = {
3113 .quirk = "disable-wp",
3114 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3118 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3120 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3125 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3126 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3127 quirks |= of_slot_quirks[idx].id;
3132 /* find out bus-width for a given slot */
3133 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3135 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3141 if (of_property_read_u32(np, "bus-width", &bus_wd))
3142 dev_err(dev, "bus-width property not found, assuming width"
3148 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3149 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3151 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3157 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3159 /* Having a missing entry is valid; return silently */
3160 if (!gpio_is_valid(gpio))
3163 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3164 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3168 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3174 /* find the write protect gpio for a given slot; or -1 if none specified */
3175 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3177 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3183 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3185 /* Having a missing entry is valid; return silently */
3186 if (!gpio_is_valid(gpio))
3189 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3190 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3197 /* find the cd gpio for a given slot */
3198 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3199 struct mmc_host *mmc)
3201 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3207 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3209 /* Having a missing entry is valid; return silently */
3210 if (!gpio_is_valid(gpio))
3213 if (mmc_gpio_request_cd(mmc, gpio, 0))
3214 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3217 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3219 struct mmc_host *mmc = dev_id;
3220 struct dw_mci_slot *slot = mmc_priv(mmc);
3221 struct dw_mci *host = slot->host;
3222 int gpio_cd = slot->cd_gpio;
3224 (gpio_get_value(gpio_cd) == 0) ?
3225 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3226 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3228 /* wakeup system whether gpio debounce or not */
3229 rk_send_wakeup_key();
3231 /* no need to trigger detect flow when rescan is disabled.
3232 This case happended in dpm, that we just wakeup system and
3233 let suspend_post notify callback handle it.
3235 if(mmc->rescan_disable == 0)
3236 queue_work(host->card_workqueue, &host->card_work);
3238 printk("%s: rescan been disabled!\n", __FUNCTION__);
3243 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3244 struct mmc_host *mmc)
3246 struct dw_mci_slot *slot = mmc_priv(mmc);
3247 struct dw_mci *host = slot->host;
3251 /* Having a missing entry is valid; return silently */
3252 if (!gpio_is_valid(gpio))
3255 irq = gpio_to_irq(gpio);
3257 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3258 NULL, dw_mci_gpio_cd_irqt,
3259 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3263 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3265 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3266 enable_irq_wake(irq);
3269 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3273 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3274 struct mmc_host *mmc)
3276 if (!gpio_is_valid(gpio))
3279 if (gpio_to_irq(gpio) >= 0) {
3280 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3281 devm_gpio_free(&mmc->class_dev, gpio);
3284 #else /* CONFIG_OF */
3285 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3289 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3293 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3297 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3301 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3302 struct mmc_host *mmc)
3306 #endif /* CONFIG_OF */
3308 /* @host: dw_mci host prvdata
3309 * Init pinctrl for each platform. Usually we assign
3310 * "defalut" tag for functional usage, "idle" tag for gpio
3311 * state and "udbg" tag for uart_dbg if any.
3313 static void dw_mci_init_pinctrl(struct dw_mci *host)
3315 /* Fixme: DON'T TOUCH EMMC SETTING! */
3316 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3319 /* Get pinctrl for DTS */
3320 host->pinctrl = devm_pinctrl_get(host->dev);
3321 if (IS_ERR(host->pinctrl)) {
3322 dev_err(host->dev, "%s: No pinctrl used!\n",
3323 mmc_hostname(host->mmc));
3327 /* Lookup idle state */
3328 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3329 PINCTRL_STATE_IDLE);
3330 if (IS_ERR(host->pins_idle)) {
3331 dev_err(host->dev, "%s: No idle tag found!\n",
3332 mmc_hostname(host->mmc));
3334 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3335 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3336 mmc_hostname(host->mmc));
3339 /* Lookup default state */
3340 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3341 PINCTRL_STATE_DEFAULT);
3342 if (IS_ERR(host->pins_default)) {
3343 dev_err(host->dev, "%s: No default pinctrl found!\n",
3344 mmc_hostname(host->mmc));
3346 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3347 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3348 mmc_hostname(host->mmc));
3351 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3352 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3353 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3354 if (IS_ERR(host->pins_udbg)) {
3355 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3356 mmc_hostname(host->mmc));
3358 if (!dw_mci_get_cd(host->mmc))
3359 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3360 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3361 mmc_hostname(host->mmc));
3366 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3367 unsigned long mode, void *unused)
3369 struct mmc_host *host = container_of(
3370 notify_block, struct mmc_host, pm_notify);
3371 unsigned long flags;
3374 case PM_HIBERNATION_PREPARE:
3375 case PM_SUSPEND_PREPARE:
3376 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3377 spin_lock_irqsave(&host->lock, flags);
3378 host->rescan_disable = 1;
3379 spin_unlock_irqrestore(&host->lock, flags);
3380 if (cancel_delayed_work(&host->detect))
3381 wake_unlock(&host->detect_wake_lock);
3384 case PM_POST_SUSPEND:
3385 case PM_POST_HIBERNATION:
3386 case PM_POST_RESTORE:
3387 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3388 spin_lock_irqsave(&host->lock, flags);
3389 host->rescan_disable = 0;
3390 spin_unlock_irqrestore(&host->lock, flags);
3391 mmc_detect_change(host, 10);
3397 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3399 struct mmc_host *mmc;
3400 struct dw_mci_slot *slot;
3401 const struct dw_mci_drv_data *drv_data = host->drv_data;
3406 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3410 slot = mmc_priv(mmc);
3414 host->slot[id] = slot;
3417 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3419 mmc->ops = &dw_mci_ops;
3421 if (of_property_read_u32_array(host->dev->of_node,
3422 "clock-freq-min-max", freq, 2)) {
3423 mmc->f_min = DW_MCI_FREQ_MIN;
3424 mmc->f_max = DW_MCI_FREQ_MAX;
3426 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3427 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3429 mmc->f_min = freq[0];
3430 mmc->f_max = freq[1];
3432 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3433 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3436 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3438 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3439 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3440 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3441 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3442 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3443 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3445 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3446 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3448 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3449 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3450 if (register_pm_notifier(&mmc->pm_notify)) {
3451 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3452 goto err_pm_notifier;
3456 /* We assume only low-level chip use gpio_cd */
3457 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3458 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3459 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3460 if (gpio_is_valid(slot->cd_gpio)) {
3461 /* Request gpio int for card detection */
3462 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3464 slot->cd_gpio = -ENODEV;
3465 dev_err(host->dev, "failed to get your cd-gpios!\n");
3469 if (host->pdata->get_ocr)
3470 mmc->ocr_avail = host->pdata->get_ocr(id);
3473 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3474 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3475 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3476 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3480 * Start with slot power disabled, it will be enabled when a card
3483 if (host->pdata->setpower)
3484 host->pdata->setpower(id, 0);
3486 if (host->pdata->caps)
3487 mmc->caps = host->pdata->caps;
3489 if (host->pdata->pm_caps)
3490 mmc->pm_caps = host->pdata->pm_caps;
3492 if (host->dev->of_node) {
3493 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3497 ctrl_id = to_platform_device(host->dev)->id;
3499 if (drv_data && drv_data->caps)
3500 mmc->caps |= drv_data->caps[ctrl_id];
3501 if (drv_data && drv_data->hold_reg_flag)
3502 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3504 /* set the compatibility of driver. */
3505 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3506 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3508 if (host->pdata->caps2)
3509 mmc->caps2 = host->pdata->caps2;
3511 if (host->pdata->get_bus_wd)
3512 bus_width = host->pdata->get_bus_wd(slot->id);
3513 else if (host->dev->of_node)
3514 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3518 switch (bus_width) {
3520 mmc->caps |= MMC_CAP_8_BIT_DATA;
3522 mmc->caps |= MMC_CAP_4_BIT_DATA;
3525 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3526 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3527 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3528 mmc->caps |= MMC_CAP_SDIO_IRQ;
3529 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3530 mmc->caps |= MMC_CAP_HW_RESET;
3531 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3532 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3533 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3534 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3535 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3536 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3537 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3538 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3540 /*Assign pm_caps pass to pm_flags*/
3541 mmc->pm_flags = mmc->pm_caps;
3543 if (host->pdata->blk_settings) {
3544 mmc->max_segs = host->pdata->blk_settings->max_segs;
3545 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3546 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3547 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3548 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3550 /* Useful defaults if platform data is unset. */
3551 #ifdef CONFIG_MMC_DW_IDMAC
3552 mmc->max_segs = host->ring_size;
3553 mmc->max_blk_size = 65536;
3554 mmc->max_blk_count = host->ring_size;
3555 mmc->max_seg_size = 0x1000;
3556 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3557 if(cpu_is_rk3036() || cpu_is_rk312x()){
3558 /* fixup for external dmac setting */
3560 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3561 mmc->max_blk_count = 65535;
3562 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3563 mmc->max_seg_size = mmc->max_req_size;
3567 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3568 mmc->max_blk_count = 512;
3569 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3570 mmc->max_seg_size = mmc->max_req_size;
3571 #endif /* CONFIG_MMC_DW_IDMAC */
3575 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3577 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3582 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3583 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3587 if (IS_ERR(host->vmmc)) {
3588 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3591 ret = regulator_enable(host->vmmc);
3594 "failed to enable regulator: %d\n", ret);
3601 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3603 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3604 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3606 dw_mci_init_pinctrl(host);
3607 ret = mmc_add_host(mmc);
3611 #if defined(CONFIG_DEBUG_FS)
3612 dw_mci_init_debugfs(slot);
3615 /* Card initially undetected */
3616 slot->last_detect_state = 1;
3620 unregister_pm_notifier(&mmc->pm_notify);
3623 if (gpio_is_valid(slot->cd_gpio))
3624 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3629 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3631 /* Shutdown detect IRQ */
3632 if (slot->host->pdata->exit)
3633 slot->host->pdata->exit(id);
3635 /* Debugfs stuff is cleaned up by mmc core */
3636 mmc_remove_host(slot->mmc);
3637 slot->host->slot[id] = NULL;
3638 mmc_free_host(slot->mmc);
3641 static void dw_mci_init_dma(struct dw_mci *host)
3643 /* Alloc memory for sg translation */
3644 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3645 &host->sg_dma, GFP_KERNEL);
3646 if (!host->sg_cpu) {
3647 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3652 memset(host->sg_cpu, 0, PAGE_SIZE);
3655 /* Determine which DMA interface to use */
3656 #if defined(CONFIG_MMC_DW_IDMAC)
3657 if(cpu_is_rk3036() || cpu_is_rk312x()){
3658 host->dma_ops = &dw_mci_edmac_ops;
3659 dev_info(host->dev, "Using external DMA controller.\n");
3661 host->dma_ops = &dw_mci_idmac_ops;
3662 dev_info(host->dev, "Using internal DMA controller.\n");
3669 if (host->dma_ops->init && host->dma_ops->start &&
3670 host->dma_ops->stop && host->dma_ops->cleanup) {
3671 if (host->dma_ops->init(host)) {
3672 dev_err(host->dev, "%s: Unable to initialize "
3673 "DMA Controller.\n", __func__);
3677 dev_err(host->dev, "DMA initialization not found.\n");
3685 dev_info(host->dev, "Using PIO mode.\n");
3690 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3692 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3695 ctrl = mci_readl(host, CTRL);
3697 mci_writel(host, CTRL, ctrl);
3699 /* wait till resets clear */
3701 ctrl = mci_readl(host, CTRL);
3702 if (!(ctrl & reset))
3704 } while (time_before(jiffies, timeout));
3707 "Timeout resetting block (ctrl reset %#x)\n",
3713 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3716 * Reseting generates a block interrupt, hence setting
3717 * the scatter-gather pointer to NULL.
3720 sg_miter_stop(&host->sg_miter);
3724 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3727 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3729 return dw_mci_ctrl_reset(host,
3730 SDMMC_CTRL_FIFO_RESET |
3732 SDMMC_CTRL_DMA_RESET);
3737 static struct dw_mci_of_quirks {
3742 .quirk = "broken-cd",
3743 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3747 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3749 struct dw_mci_board *pdata;
3750 struct device *dev = host->dev;
3751 struct device_node *np = dev->of_node;
3752 const struct dw_mci_drv_data *drv_data = host->drv_data;
3754 u32 clock_frequency;
3756 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3758 dev_err(dev, "could not allocate memory for pdata\n");
3759 return ERR_PTR(-ENOMEM);
3762 /* find out number of slots supported */
3763 if (of_property_read_u32(dev->of_node, "num-slots",
3764 &pdata->num_slots)) {
3765 dev_info(dev, "num-slots property not found, "
3766 "assuming 1 slot is available\n");
3767 pdata->num_slots = 1;
3771 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3772 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3773 pdata->quirks |= of_quirks[idx].id;
3776 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3777 dev_info(dev, "fifo-depth property not found, using "
3778 "value of FIFOTH register as default\n");
3780 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3782 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3783 pdata->bus_hz = clock_frequency;
3785 if (drv_data && drv_data->parse_dt) {
3786 ret = drv_data->parse_dt(host);
3788 return ERR_PTR(ret);
3791 if (of_find_property(np, "keep-power-in-suspend", NULL))
3792 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3794 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3795 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3797 if (of_find_property(np, "supports-highspeed", NULL))
3798 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3800 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3801 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3803 if (of_find_property(np, "supports-DDR_MODE", NULL))
3804 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3806 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3807 pdata->caps2 |= MMC_CAP2_HS200;
3809 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3810 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3812 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3813 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3815 if (of_get_property(np, "cd-inverted", NULL))
3816 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3817 if (of_get_property(np, "bootpart-no-access", NULL))
3818 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3823 #else /* CONFIG_OF */
3824 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3826 return ERR_PTR(-EINVAL);
3828 #endif /* CONFIG_OF */
3830 int dw_mci_probe(struct dw_mci *host)
3832 const struct dw_mci_drv_data *drv_data = host->drv_data;
3833 int width, i, ret = 0;
3839 host->pdata = dw_mci_parse_dt(host);
3840 if (IS_ERR(host->pdata)) {
3841 dev_err(host->dev, "platform data not available\n");
3846 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3848 "Platform data must supply select_slot function\n");
3853 * In 2.40a spec, Data offset is changed.
3854 * Need to check the version-id and set data-offset for DATA register.
3856 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3857 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3859 if (host->verid < DW_MMC_240A)
3860 host->data_offset = DATA_OFFSET;
3862 host->data_offset = DATA_240A_OFFSET;
3865 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3866 if (IS_ERR(host->hclk_mmc)) {
3867 dev_err(host->dev, "failed to get hclk_mmc\n");
3868 ret = PTR_ERR(host->hclk_mmc);
3872 clk_prepare_enable(host->hclk_mmc);
3875 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3876 if (IS_ERR(host->clk_mmc)) {
3877 dev_err(host->dev, "failed to get clk mmc_per\n");
3878 ret = PTR_ERR(host->clk_mmc);
3882 host->bus_hz = host->pdata->bus_hz;
3883 if (!host->bus_hz) {
3884 dev_err(host->dev,"Platform data must supply bus speed\n");
3889 if (host->verid < DW_MMC_240A)
3890 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3892 //rockchip: fix divider 2 in clksum before controlller
3893 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3896 dev_err(host->dev, "failed to set clk mmc\n");
3899 clk_prepare_enable(host->clk_mmc);
3901 if (drv_data && drv_data->setup_clock) {
3902 ret = drv_data->setup_clock(host);
3905 "implementation specific clock setup failed\n");
3910 host->quirks = host->pdata->quirks;
3911 host->irq_state = true;
3912 host->set_speed = 0;
3914 host->svi_flags = 0;
3916 spin_lock_init(&host->lock);
3917 spin_lock_init(&host->slock);
3919 INIT_LIST_HEAD(&host->queue);
3921 * Get the host data width - this assumes that HCON has been set with
3922 * the correct values.
3924 i = (mci_readl(host, HCON) >> 7) & 0x7;
3926 host->push_data = dw_mci_push_data16;
3927 host->pull_data = dw_mci_pull_data16;
3929 host->data_shift = 1;
3930 } else if (i == 2) {
3931 host->push_data = dw_mci_push_data64;
3932 host->pull_data = dw_mci_pull_data64;
3934 host->data_shift = 3;
3936 /* Check for a reserved value, and warn if it is */
3938 "HCON reports a reserved host data width!\n"
3939 "Defaulting to 32-bit access.\n");
3940 host->push_data = dw_mci_push_data32;
3941 host->pull_data = dw_mci_pull_data32;
3943 host->data_shift = 2;
3946 /* Reset all blocks */
3947 if (!dw_mci_ctrl_all_reset(host))
3950 host->dma_ops = host->pdata->dma_ops;
3951 dw_mci_init_dma(host);
3953 /* Clear the interrupts for the host controller */
3954 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3955 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3957 /* Put in max timeout */
3958 mci_writel(host, TMOUT, 0xFFFFFFFF);
3961 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3962 * Tx Mark = fifo_size / 2 DMA Size = 8
3964 if (!host->pdata->fifo_depth) {
3966 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3967 * have been overwritten by the bootloader, just like we're
3968 * about to do, so if you know the value for your hardware, you
3969 * should put it in the platform data.
3971 fifo_size = mci_readl(host, FIFOTH);
3972 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3974 fifo_size = host->pdata->fifo_depth;
3976 host->fifo_depth = fifo_size;
3978 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3979 mci_writel(host, FIFOTH, host->fifoth_val);
3981 /* disable clock to CIU */
3982 mci_writel(host, CLKENA, 0);
3983 mci_writel(host, CLKSRC, 0);
3985 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3986 host->card_workqueue = alloc_workqueue("dw-mci-card",
3987 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3988 if (!host->card_workqueue) {
3992 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3993 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3994 host->irq_flags, "dw-mci", host);
3998 if (host->pdata->num_slots)
3999 host->num_slots = host->pdata->num_slots;
4001 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4003 /* We need at least one slot to succeed */
4004 for (i = 0; i < host->num_slots; i++) {
4005 ret = dw_mci_init_slot(host, i);
4007 dev_dbg(host->dev, "slot %d init failed\n", i);
4013 * Enable interrupts for command done, data over, data empty, card det,
4014 * receive ready and error such as transmit, receive timeout, crc error
4016 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4017 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4018 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4019 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4020 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4021 regs |= SDMMC_INT_CD;
4023 mci_writel(host, INTMASK, regs);
4025 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4027 dev_info(host->dev, "DW MMC controller at irq %d, "
4028 "%d bit host data width, "
4030 host->irq, width, fifo_size);
4033 dev_info(host->dev, "%d slots initialized\n", init_slots);
4035 dev_dbg(host->dev, "attempted to initialize %d slots, "
4036 "but failed on all\n", host->num_slots);
4041 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4042 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4047 destroy_workqueue(host->card_workqueue);
4050 if (host->use_dma && host->dma_ops->exit)
4051 host->dma_ops->exit(host);
4054 regulator_disable(host->vmmc);
4055 regulator_put(host->vmmc);
4059 if (!IS_ERR(host->clk_mmc))
4060 clk_disable_unprepare(host->clk_mmc);
4062 if (!IS_ERR(host->hclk_mmc))
4063 clk_disable_unprepare(host->hclk_mmc);
4067 EXPORT_SYMBOL(dw_mci_probe);
4069 void dw_mci_remove(struct dw_mci *host)
4071 struct mmc_host *mmc = host->mmc;
4072 struct dw_mci_slot *slot = mmc_priv(mmc);
4075 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4076 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4078 for(i = 0; i < host->num_slots; i++){
4079 dev_dbg(host->dev, "remove slot %d\n", i);
4081 dw_mci_cleanup_slot(host->slot[i], i);
4084 /* disable clock to CIU */
4085 mci_writel(host, CLKENA, 0);
4086 mci_writel(host, CLKSRC, 0);
4088 destroy_workqueue(host->card_workqueue);
4089 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4090 unregister_pm_notifier(&host->mmc->pm_notify);
4092 if(host->use_dma && host->dma_ops->exit)
4093 host->dma_ops->exit(host);
4095 if (gpio_is_valid(slot->cd_gpio))
4096 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4099 regulator_disable(host->vmmc);
4100 regulator_put(host->vmmc);
4102 if(!IS_ERR(host->clk_mmc))
4103 clk_disable_unprepare(host->clk_mmc);
4105 if(!IS_ERR(host->hclk_mmc))
4106 clk_disable_unprepare(host->hclk_mmc);
4108 EXPORT_SYMBOL(dw_mci_remove);
4112 #ifdef CONFIG_PM_SLEEP
4114 * TODO: we should probably disable the clock to the card in the suspend path.
4116 extern int get_wifi_chip_type(void);
4117 int dw_mci_suspend(struct dw_mci *host)
4119 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4120 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4124 regulator_disable(host->vmmc);
4126 /*only for sdmmc controller*/
4127 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4128 disable_irq(host->irq);
4129 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4130 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4131 mmc_hostname(host->mmc));
4133 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4134 mci_writel(host, INTMASK, 0x00);
4135 mci_writel(host, CTRL, 0x00);
4137 /* Soc rk3126/3036 already in gpio_cd mode */
4138 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4139 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4140 enable_irq_wake(host->mmc->slot.cd_irq);
4145 EXPORT_SYMBOL(dw_mci_suspend);
4147 int dw_mci_resume(struct dw_mci *host)
4149 int i, ret, retry_cnt = 0;
4151 struct dw_mci_slot *slot;
4153 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4154 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4159 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4160 slot = mmc_priv(host->mmc);
4161 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4165 /*only for sdmmc controller*/
4166 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4167 /* Soc rk3126/3036 already in gpio_cd mode */
4168 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4169 disable_irq_wake(host->mmc->slot.cd_irq);
4170 mmc_gpio_free_cd(host->mmc);
4172 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4173 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4174 mmc_hostname(host->mmc));
4178 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4179 else if(cpu_is_rk3036())
4180 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4181 else if(cpu_is_rk312x())
4182 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4183 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4186 ret = regulator_enable(host->vmmc);
4189 "failed to enable regulator: %d\n", ret);
4194 if(!dw_mci_ctrl_all_reset(host)){
4199 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4200 if(host->use_dma && host->dma_ops->init)
4201 host->dma_ops->init(host);
4204 * Restore the initial value at FIFOTH register
4205 * And Invalidate the prev_blksz with zero
4207 mci_writel(host, FIFOTH, host->fifoth_val);
4208 host->prev_blksz = 0;
4209 /* Put in max timeout */
4210 mci_writel(host, TMOUT, 0xFFFFFFFF);
4212 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4213 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4215 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4216 regs |= SDMMC_INT_CD;
4217 mci_writel(host, INTMASK, regs);
4218 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4219 /*only for sdmmc controller*/
4220 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4221 enable_irq(host->irq);
4224 for(i = 0; i < host->num_slots; i++){
4225 struct dw_mci_slot *slot = host->slot[i];
4228 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4229 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4230 dw_mci_setup_bus(slot, true);
4236 EXPORT_SYMBOL(dw_mci_resume);
4237 #endif /* CONFIG_PM_SLEEP */
4239 static int __init dw_mci_init(void)
4241 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4245 static void __exit dw_mci_exit(void)
4249 module_init(dw_mci_init);
4250 module_exit(dw_mci_exit);
4252 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4253 MODULE_AUTHOR("NXP Semiconductor VietNam");
4254 MODULE_AUTHOR("Imagination Technologies Ltd");
4255 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4256 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4257 MODULE_LICENSE("GPL v2");