2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
51 #include <linux/log2.h>
53 #include "rk_sdmmc_dbg.h"
54 #include <linux/regulator/rockchip_io_vol_domain.h>
55 #include "../../clk/rockchip/clk-ops.h"
57 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
59 /* Common flag combinations */
60 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
61 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
63 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
65 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
66 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
67 #define DW_MCI_SEND_STATUS 1
68 #define DW_MCI_RECV_STATUS 2
69 #define DW_MCI_DMA_THRESHOLD 16
71 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
72 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
74 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
75 #define SDMMC_DATA_TIMEOUT_SD 500
76 #define SDMMC_DATA_TIMEOUT_SDIO 250
77 #define SDMMC_DATA_TIMEOUT_EMMC 2500
79 #define SDMMC_CMD_RTO_MAX_HOLD 200
80 #define SDMMC_WAIT_FOR_UNBUSY 2500
82 #define DW_REGS_SIZE (0x0098 + 4)
83 #define DW_REGS_NUM (0x0098 / 4)
85 #ifdef CONFIG_MMC_DW_IDMAC
86 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
87 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
88 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
92 u32 des0; /* Control Descriptor */
93 #define IDMAC_DES0_DIC BIT(1)
94 #define IDMAC_DES0_LD BIT(2)
95 #define IDMAC_DES0_FD BIT(3)
96 #define IDMAC_DES0_CH BIT(4)
97 #define IDMAC_DES0_ER BIT(5)
98 #define IDMAC_DES0_CES BIT(30)
99 #define IDMAC_DES0_OWN BIT(31)
101 u32 des1; /* Buffer sizes */
102 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
103 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
105 u32 des2; /* buffer 1 physical address */
107 u32 des3; /* buffer 2 physical address */
109 #endif /* CONFIG_MMC_DW_IDMAC */
111 static const u8 tuning_blk_pattern_4bit[] = {
112 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
113 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
114 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
115 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
116 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
117 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
118 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
119 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
122 static const u8 tuning_blk_pattern_8bit[] = {
123 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
124 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
125 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
126 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
127 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
128 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
129 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
130 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
131 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
132 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
133 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
134 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
135 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
136 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
137 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
138 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
142 static struct sdmmc_reg dw_mci_regs[] =
146 { 0x0008, "CLKDIV" },
147 { 0x000C, "CLKSRC" },
148 { 0x0010, "CLKENA" },
151 { 0x001C, "BLKSIZ" },
152 { 0x0020, "BYTCNT" },
153 { 0x0024, "INTMASK" },
154 { 0x0028, "CMDARG" },
160 { 0x0040, "MINSTS" },
161 { 0x0044, "RINTSTS" },
162 { 0x0048, "STATUS" },
163 { 0x004C, "FIFOTH" },
164 { 0x0050, "CDETECT" },
165 { 0x0054, "WRTPRT" },
167 { 0x005C, "TCBCNT" },
168 { 0x0060, "TBBCNT" },
169 { 0x0064, "DEBNCE" },
173 { 0x0074, "UHS_REG" },
176 { 0x0084, "PLDMND" },
177 { 0x0088, "DBADDR" },
179 { 0x0090, "IDINTEN" },
180 { 0x0094, "DSCADDR" },
181 { 0x0098, "BUFADDR" },
182 { 0x0100, "CARDTHRCTL" },
183 { 0x0104, "BackEndPwr" },
187 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
188 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
189 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
190 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
192 /*printk the all register of current host*/
194 static int dw_mci_regs_printk(struct dw_mci *host)
196 struct sdmmc_reg *regs = dw_mci_regs;
198 while( regs->name != 0 ){
199 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
202 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
207 #if defined(CONFIG_DEBUG_FS)
208 static int dw_mci_req_show(struct seq_file *s, void *v)
210 struct dw_mci_slot *slot = s->private;
211 struct mmc_request *mrq;
212 struct mmc_command *cmd;
213 struct mmc_command *stop;
214 struct mmc_data *data;
216 /* Make sure we get a consistent snapshot */
217 spin_lock_bh(&slot->host->lock);
227 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
228 cmd->opcode, cmd->arg, cmd->flags,
229 cmd->resp[0], cmd->resp[1], cmd->resp[2],
230 cmd->resp[2], cmd->error);
232 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
233 data->bytes_xfered, data->blocks,
234 data->blksz, data->flags, data->error);
237 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
238 stop->opcode, stop->arg, stop->flags,
239 stop->resp[0], stop->resp[1], stop->resp[2],
240 stop->resp[2], stop->error);
243 spin_unlock_bh(&slot->host->lock);
248 static int dw_mci_req_open(struct inode *inode, struct file *file)
250 return single_open(file, dw_mci_req_show, inode->i_private);
253 static const struct file_operations dw_mci_req_fops = {
254 .owner = THIS_MODULE,
255 .open = dw_mci_req_open,
258 .release = single_release,
261 static int dw_mci_regs_show(struct seq_file *s, void *v)
263 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
264 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
265 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
266 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
267 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
268 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
273 static int dw_mci_regs_open(struct inode *inode, struct file *file)
275 return single_open(file, dw_mci_regs_show, inode->i_private);
278 static const struct file_operations dw_mci_regs_fops = {
279 .owner = THIS_MODULE,
280 .open = dw_mci_regs_open,
283 .release = single_release,
286 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
288 struct mmc_host *mmc = slot->mmc;
289 struct dw_mci *host = slot->host;
293 root = mmc->debugfs_root;
297 node = debugfs_create_file("regs", S_IRUSR, root, host,
302 node = debugfs_create_file("req", S_IRUSR, root, slot,
307 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
311 node = debugfs_create_x32("pending_events", S_IRUSR, root,
312 (u32 *)&host->pending_events);
316 node = debugfs_create_x32("completed_events", S_IRUSR, root,
317 (u32 *)&host->completed_events);
324 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
326 #endif /* defined(CONFIG_DEBUG_FS) */
328 static void dw_mci_set_timeout(struct dw_mci *host)
330 /* timeout (maximum) */
331 mci_writel(host, TMOUT, 0xffffffff);
334 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
336 struct mmc_data *data;
337 struct dw_mci_slot *slot = mmc_priv(mmc);
338 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
340 cmd->error = -EINPROGRESS;
344 if (cmdr == MMC_STOP_TRANSMISSION)
345 cmdr |= SDMMC_CMD_STOP;
347 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
349 if (cmd->flags & MMC_RSP_PRESENT) {
350 /* We expect a response, so set this bit */
351 cmdr |= SDMMC_CMD_RESP_EXP;
352 if (cmd->flags & MMC_RSP_136)
353 cmdr |= SDMMC_CMD_RESP_LONG;
356 if (cmd->flags & MMC_RSP_CRC)
357 cmdr |= SDMMC_CMD_RESP_CRC;
361 cmdr |= SDMMC_CMD_DAT_EXP;
362 if (data->flags & MMC_DATA_STREAM)
363 cmdr |= SDMMC_CMD_STRM_MODE;
364 if (data->flags & MMC_DATA_WRITE)
365 cmdr |= SDMMC_CMD_DAT_WR;
368 if (drv_data && drv_data->prepare_command)
369 drv_data->prepare_command(slot->host, &cmdr);
375 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
377 struct mmc_command *stop;
383 stop = &host->stop_abort;
385 memset(stop, 0, sizeof(struct mmc_command));
387 if (cmdr == MMC_READ_SINGLE_BLOCK ||
388 cmdr == MMC_READ_MULTIPLE_BLOCK ||
389 cmdr == MMC_WRITE_BLOCK ||
390 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
391 stop->opcode = MMC_STOP_TRANSMISSION;
393 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
394 } else if (cmdr == SD_IO_RW_EXTENDED) {
395 stop->opcode = SD_IO_RW_DIRECT;
396 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
397 ((cmd->arg >> 28) & 0x7);
398 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
403 cmdr = stop->opcode | SDMMC_CMD_STOP |
404 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
409 static void dw_mci_start_command(struct dw_mci *host,
410 struct mmc_command *cmd, u32 cmd_flags)
412 struct dw_mci_slot *slot = host->slot[0];
413 /*temporality fix slot[0] due to host->num_slots equal to 1*/
415 host->pre_cmd = host->cmd;
418 "start command: ARGR=0x%08x CMDR=0x%08x\n",
419 cmd->arg, cmd_flags);
421 if(SD_SWITCH_VOLTAGE == cmd->opcode){
422 /*confirm non-low-power mode*/
423 mci_writel(host, CMDARG, 0);
424 dw_mci_disable_low_power(slot);
426 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
427 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
429 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
432 mci_writel(host, CMDARG, cmd->arg);
435 /* fix the value to 1 in some Soc,for example RK3188. */
436 if(host->mmc->hold_reg_flag)
437 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
439 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
443 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
445 dw_mci_start_command(host, data->stop, host->stop_cmdr);
448 /* DMA interface functions */
449 static void dw_mci_stop_dma(struct dw_mci *host)
451 if (host->using_dma) {
452 /* Fixme: No need to terminate edma, may cause flush op */
453 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
454 host->dma_ops->stop(host);
455 host->dma_ops->cleanup(host);
458 /* Data transfer was stopped by the interrupt handler */
459 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
462 static int dw_mci_get_dma_dir(struct mmc_data *data)
464 if (data->flags & MMC_DATA_WRITE)
465 return DMA_TO_DEVICE;
467 return DMA_FROM_DEVICE;
470 #ifdef CONFIG_MMC_DW_IDMAC
471 static void dw_mci_dma_cleanup(struct dw_mci *host)
473 struct mmc_data *data = host->data;
476 if (!data->host_cookie)
477 dma_unmap_sg(host->dev,
480 dw_mci_get_dma_dir(data));
483 static void dw_mci_idmac_reset(struct dw_mci *host)
485 u32 bmod = mci_readl(host, BMOD);
486 /* Software reset of DMA */
487 bmod |= SDMMC_IDMAC_SWRESET;
488 mci_writel(host, BMOD, bmod);
491 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
495 /* Disable and reset the IDMAC interface */
496 temp = mci_readl(host, CTRL);
497 temp &= ~SDMMC_CTRL_USE_IDMAC;
498 temp |= SDMMC_CTRL_DMA_RESET;
499 mci_writel(host, CTRL, temp);
501 /* Stop the IDMAC running */
502 temp = mci_readl(host, BMOD);
503 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
504 temp |= SDMMC_IDMAC_SWRESET;
505 mci_writel(host, BMOD, temp);
508 static void dw_mci_idmac_complete_dma(void *arg)
510 struct dw_mci *host = arg;
511 struct mmc_data *data = host->data;
513 dev_vdbg(host->dev, "DMA complete\n");
516 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
517 host->mrq->cmd->opcode,host->mrq->cmd->arg,
518 data->blocks,data->blksz,mmc_hostname(host->mmc));
521 host->dma_ops->cleanup(host);
524 * If the card was removed, data will be NULL. No point in trying to
525 * send the stop command or waiting for NBUSY in this case.
528 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
529 tasklet_schedule(&host->tasklet);
533 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
537 struct idmac_desc *desc = host->sg_cpu;
539 for (i = 0; i < sg_len; i++, desc++) {
540 unsigned int length = sg_dma_len(&data->sg[i]);
541 u32 mem_addr = sg_dma_address(&data->sg[i]);
543 /* Set the OWN bit and disable interrupts for this descriptor */
544 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
547 IDMAC_SET_BUFFER1_SIZE(desc, length);
549 /* Physical address to DMA to/from */
550 desc->des2 = mem_addr;
553 /* Set first descriptor */
555 desc->des0 |= IDMAC_DES0_FD;
557 /* Set last descriptor */
558 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
559 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
560 desc->des0 |= IDMAC_DES0_LD;
565 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
569 dw_mci_translate_sglist(host, host->data, sg_len);
571 /* Select IDMAC interface */
572 temp = mci_readl(host, CTRL);
573 temp |= SDMMC_CTRL_USE_IDMAC;
574 mci_writel(host, CTRL, temp);
578 /* Enable the IDMAC */
579 temp = mci_readl(host, BMOD);
580 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
581 mci_writel(host, BMOD, temp);
583 /* Start it running */
584 mci_writel(host, PLDMND, 1);
587 static int dw_mci_idmac_init(struct dw_mci *host)
589 struct idmac_desc *p;
592 /* Number of descriptors in the ring buffer */
593 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
595 /* Forward link the descriptor list */
596 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
597 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
600 /* Set the last descriptor as the end-of-ring descriptor */
601 p->des3 = host->sg_dma;
602 p->des0 = IDMAC_DES0_ER;
604 dw_mci_idmac_reset(host);
606 /* Mask out interrupts - get Tx & Rx complete only */
607 mci_writel(host, IDSTS, IDMAC_INT_CLR);
608 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
611 /* Set the descriptor base address */
612 mci_writel(host, DBADDR, host->sg_dma);
616 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
617 .init = dw_mci_idmac_init,
618 .start = dw_mci_idmac_start_dma,
619 .stop = dw_mci_idmac_stop_dma,
620 .complete = dw_mci_idmac_complete_dma,
621 .cleanup = dw_mci_dma_cleanup,
625 static void dw_mci_edma_cleanup(struct dw_mci *host)
627 struct mmc_data *data = host->data;
630 if (!data->host_cookie)
631 dma_unmap_sg(host->dev,
632 data->sg, data->sg_len,
633 dw_mci_get_dma_dir(data));
636 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
638 dmaengine_terminate_all(host->dms->ch);
641 static void dw_mci_edmac_complete_dma(void *arg)
643 struct dw_mci *host = arg;
644 struct mmc_data *data = host->data;
646 dev_vdbg(host->dev, "DMA complete\n");
649 if(data->flags & MMC_DATA_READ)
650 /* Invalidate cache after read */
651 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
652 data->sg_len, DMA_FROM_DEVICE);
654 host->dma_ops->cleanup(host);
657 * If the card was removed, data will be NULL. No point in trying to
658 * send the stop command or waiting for NBUSY in this case.
661 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
662 tasklet_schedule(&host->tasklet);
666 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
668 struct dma_slave_config slave_config;
669 struct dma_async_tx_descriptor *desc = NULL;
670 struct scatterlist *sgl = host->data->sg;
671 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
672 u32 sg_elems = host->data->sg_len;
673 u32 fifoth_val, mburst;
675 u32 idx, rx_wmark, tx_wmark;
678 /* Set external dma config: burst size, burst width*/
679 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
680 slave_config.src_addr = slave_config.dst_addr;
681 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
682 slave_config.src_addr_width = slave_config.dst_addr_width;
684 /* Match FIFO dma burst MSIZE with external dma config*/
685 fifoth_val = mci_readl(host, FIFOTH);
686 mburst = mszs[(fifoth_val >> 28) & 0x7];
688 /* edmac limit burst to 16, but work around for rk3036 to 8 */
689 if (unlikely(cpu_is_rk3036()))
694 if (mburst > burst_limit) {
695 mburst = burst_limit;
696 idx = (ilog2(mburst) > 0) ? (ilog2(mburst) - 1) : 0;
698 rx_wmark = mszs[idx] - 1;
699 tx_wmark = (host->fifo_depth) / 2;
700 fifoth_val = SDMMC_SET_FIFOTH(idx, rx_wmark, tx_wmark);
702 mci_writel(host, FIFOTH, fifoth_val);
705 slave_config.dst_maxburst = mburst;
706 slave_config.src_maxburst = slave_config.dst_maxburst;
708 if(host->data->flags & MMC_DATA_WRITE){
709 slave_config.direction = DMA_MEM_TO_DEV;
710 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
712 dev_err(host->dev, "error in dw_mci edma configuration.\n");
716 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
717 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
719 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
722 /* Set dw_mci_edmac_complete_dma as callback */
723 desc->callback = dw_mci_edmac_complete_dma;
724 desc->callback_param = (void *)host;
725 dmaengine_submit(desc);
727 /* Flush cache before write */
728 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
729 sg_elems, DMA_TO_DEVICE);
730 dma_async_issue_pending(host->dms->ch);
733 slave_config.direction = DMA_DEV_TO_MEM;
734 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
736 dev_err(host->dev, "error in dw_mci edma configuration.\n");
739 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
740 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
742 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
745 /* set dw_mci_edmac_complete_dma as callback */
746 desc->callback = dw_mci_edmac_complete_dma;
747 desc->callback_param = (void *)host;
748 dmaengine_submit(desc);
749 dma_async_issue_pending(host->dms->ch);
753 static int dw_mci_edmac_init(struct dw_mci *host)
755 /* Request external dma channel, SHOULD decide chn in dts */
757 host->dms = (struct dw_mci_dma_slave *)kmalloc
758 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
759 if (NULL == host->dms) {
760 dev_err(host->dev, "No enough memory to alloc dms.\n");
764 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
765 if (!host->dms->ch) {
766 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
767 host->dms->ch->chan_id);
774 if (NULL != host->dms) {
782 static void dw_mci_edmac_exit(struct dw_mci *host)
784 if (NULL != host->dms) {
785 if (NULL != host->dms->ch) {
786 dma_release_channel(host->dms->ch);
787 host->dms->ch = NULL;
794 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
795 .init = dw_mci_edmac_init,
796 .exit = dw_mci_edmac_exit,
797 .start = dw_mci_edmac_start_dma,
798 .stop = dw_mci_edmac_stop_dma,
799 .complete = dw_mci_edmac_complete_dma,
800 .cleanup = dw_mci_edma_cleanup,
802 #endif /* CONFIG_MMC_DW_IDMAC */
804 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
805 struct mmc_data *data,
808 struct scatterlist *sg;
809 unsigned int i, sg_len;
811 if (!next && data->host_cookie)
812 return data->host_cookie;
815 * We don't do DMA on "complex" transfers, i.e. with
816 * non-word-aligned buffers or lengths. Also, we don't bother
817 * with all the DMA setup overhead for short transfers.
819 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
825 for_each_sg(data->sg, sg, data->sg_len, i) {
826 if (sg->offset & 3 || sg->length & 3)
830 sg_len = dma_map_sg(host->dev,
833 dw_mci_get_dma_dir(data));
838 data->host_cookie = sg_len;
843 static void dw_mci_pre_req(struct mmc_host *mmc,
844 struct mmc_request *mrq,
847 struct dw_mci_slot *slot = mmc_priv(mmc);
848 struct mmc_data *data = mrq->data;
850 if (!slot->host->use_dma || !data)
853 if (data->host_cookie) {
854 data->host_cookie = 0;
858 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
859 data->host_cookie = 0;
862 static void dw_mci_post_req(struct mmc_host *mmc,
863 struct mmc_request *mrq,
866 struct dw_mci_slot *slot = mmc_priv(mmc);
867 struct mmc_data *data = mrq->data;
869 if (!slot->host->use_dma || !data)
872 if (data->host_cookie)
873 dma_unmap_sg(slot->host->dev,
876 dw_mci_get_dma_dir(data));
877 data->host_cookie = 0;
880 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
882 #ifdef CONFIG_MMC_DW_IDMAC
883 unsigned int blksz = data->blksz;
884 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
885 u32 fifo_width = 1 << host->data_shift;
886 u32 blksz_depth = blksz / fifo_width, fifoth_val;
887 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
888 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
890 tx_wmark = (host->fifo_depth) / 2;
891 tx_wmark_invers = host->fifo_depth - tx_wmark;
895 * if blksz is not a multiple of the FIFO width
897 if (blksz % fifo_width) {
904 if (!((blksz_depth % mszs[idx]) ||
905 (tx_wmark_invers % mszs[idx]))) {
907 rx_wmark = mszs[idx] - 1;
912 * If idx is '0', it won't be tried
913 * Thus, initial values are uesed
916 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
917 mci_writel(host, FIFOTH, fifoth_val);
922 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
924 unsigned int blksz = data->blksz;
925 u32 blksz_depth, fifo_depth;
928 WARN_ON(!(data->flags & MMC_DATA_READ));
930 if (host->timing != MMC_TIMING_MMC_HS200 &&
931 host->timing != MMC_TIMING_UHS_SDR104)
934 blksz_depth = blksz / (1 << host->data_shift);
935 fifo_depth = host->fifo_depth;
937 if (blksz_depth > fifo_depth)
941 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
942 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
943 * Currently just choose blksz.
946 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
950 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
953 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
961 /* If we don't have a channel, we can't do DMA */
965 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
967 /* Fixme: No need terminate edma, may cause flush op */
968 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
969 host->dma_ops->stop(host);
976 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
977 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
981 * Decide the MSIZE and RX/TX Watermark.
982 * If current block size is same with previous size,
983 * no need to update fifoth.
985 if (host->prev_blksz != data->blksz)
986 dw_mci_adjust_fifoth(host, data);
989 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
991 /* Enable the DMA interface */
992 temp = mci_readl(host, CTRL);
993 temp |= SDMMC_CTRL_DMA_ENABLE;
994 mci_writel(host, CTRL, temp);
996 /* Disable RX/TX IRQs, let DMA handle it */
997 spin_lock_irqsave(&host->slock, flags);
998 temp = mci_readl(host, INTMASK);
999 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1000 mci_writel(host, INTMASK, temp);
1001 spin_unlock_irqrestore(&host->slock, flags);
1003 host->dma_ops->start(host, sg_len);
1008 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1013 data->error = -EINPROGRESS;
1015 //WARN_ON(host->data);
1020 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
1022 if (data->flags & MMC_DATA_READ) {
1023 host->dir_status = DW_MCI_RECV_STATUS;
1024 dw_mci_ctrl_rd_thld(host, data);
1026 host->dir_status = DW_MCI_SEND_STATUS;
1029 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
1030 data->blocks, data->blksz, mmc_hostname(host->mmc));
1032 if (dw_mci_submit_data_dma(host, data)) {
1033 int flags = SG_MITER_ATOMIC;
1034 if (host->data->flags & MMC_DATA_READ)
1035 flags |= SG_MITER_TO_SG;
1037 flags |= SG_MITER_FROM_SG;
1039 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1040 host->sg = data->sg;
1041 host->part_buf_start = 0;
1042 host->part_buf_count = 0;
1044 spin_lock_irqsave(&host->slock, flag);
1045 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1046 temp = mci_readl(host, INTMASK);
1047 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1048 mci_writel(host, INTMASK, temp);
1049 spin_unlock_irqrestore(&host->slock, flag);
1051 temp = mci_readl(host, CTRL);
1052 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1053 mci_writel(host, CTRL, temp);
1056 * Use the initial fifoth_val for PIO mode.
1057 * If next issued data may be transfered by DMA mode,
1058 * prev_blksz should be invalidated.
1060 mci_writel(host, FIFOTH, host->fifoth_val);
1061 host->prev_blksz = 0;
1064 * Keep the current block size.
1065 * It will be used to decide whether to update
1066 * fifoth register next time.
1068 host->prev_blksz = data->blksz;
1072 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1074 struct dw_mci *host = slot->host;
1075 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1076 unsigned int cmd_status = 0;
1077 #ifdef SDMMC_WAIT_FOR_UNBUSY
1079 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1081 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1083 ret = time_before(jiffies, timeout);
1084 cmd_status = mci_readl(host, STATUS);
1085 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1089 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1090 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1093 mci_writel(host, CMDARG, arg);
1095 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1096 if(cmd & SDMMC_CMD_UPD_CLK)
1097 timeout = jiffies + msecs_to_jiffies(50);
1099 timeout = jiffies + msecs_to_jiffies(500);
1100 while (time_before(jiffies, timeout)) {
1101 cmd_status = mci_readl(host, CMD);
1102 if (!(cmd_status & SDMMC_CMD_START))
1105 dev_err(&slot->mmc->class_dev,
1106 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1107 cmd, arg, cmd_status);
1110 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1112 struct dw_mci *host = slot->host;
1113 unsigned int tempck,clock = slot->clock;
1118 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1119 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1122 mci_writel(host, CLKENA, 0);
1123 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1124 if(host->svi_flags == 0)
1125 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1127 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1129 } else if (clock != host->current_speed || force_clkinit) {
1130 div = host->bus_hz / clock;
1131 if (host->bus_hz % clock && host->bus_hz > clock)
1133 * move the + 1 after the divide to prevent
1134 * over-clocking the card.
1138 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1140 if ((clock << div) != slot->__clk_old || force_clkinit) {
1141 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1142 dev_info(&slot->mmc->class_dev,
1143 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1144 slot->id, host->bus_hz, clock,
1147 host->set_speed = tempck;
1148 host->set_div = div;
1152 mci_writel(host, CLKENA, 0);
1153 mci_writel(host, CLKSRC, 0);
1157 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1159 if(clock <= 400*1000){
1160 MMC_DBG_BOOT_FUNC(host->mmc,
1161 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1162 clock * 2, mmc_hostname(host->mmc));
1163 /* clk_mmc will change parents to 24MHz xtal*/
1164 clk_set_rate(host->clk_mmc, clock * 2);
1167 host->set_div = div;
1171 MMC_DBG_BOOT_FUNC(host->mmc,
1172 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1173 mmc_hostname(host->mmc));
1176 MMC_DBG_ERR_FUNC(host->mmc,
1177 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1178 mmc_hostname(host->mmc));
1180 host->set_div = div;
1181 host->bus_hz = host->set_speed * 2;
1182 MMC_DBG_BOOT_FUNC(host->mmc,
1183 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1184 div, host->bus_hz, mmc_hostname(host->mmc));
1186 /* BUG may be here, come on, Linux BSP engineer looks!
1187 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1188 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1189 some oops happened like that:
1190 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1191 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1192 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1193 mmc0: new high speed DDR MMC card at address 0001
1194 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1196 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1197 mmcblk0: retrying using single block read
1198 mmcblk0: error -110 sending status command, retrying
1200 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1203 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1204 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1207 host->set_div = div;
1208 host->bus_hz = host->set_speed * 2;
1209 MMC_DBG_BOOT_FUNC(host->mmc,
1210 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1211 div, host->bus_hz, mmc_hostname(host->mmc));
1214 if (host->verid < DW_MMC_240A)
1215 clk_set_rate(host->clk_mmc,(host->bus_hz));
1217 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1223 /* set clock to desired speed */
1224 mci_writel(host, CLKDIV, div);
1228 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1230 /* enable clock; only low power if no SDIO */
1231 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1233 if (host->verid < DW_MMC_240A)
1234 sdio_int = SDMMC_INT_SDIO(slot->id);
1236 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1238 if (!(mci_readl(host, INTMASK) & sdio_int))
1239 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1240 mci_writel(host, CLKENA, clk_en_a);
1244 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1245 /* keep the clock with reflecting clock dividor */
1246 slot->__clk_old = clock << div;
1249 host->current_speed = clock;
1251 if(slot->ctype != slot->pre_ctype)
1252 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1254 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1255 mmc_hostname(host->mmc));
1256 slot->pre_ctype = slot->ctype;
1258 /* Set the current slot bus width */
1259 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1262 extern struct mmc_card *this_card;
1263 static void dw_mci_wait_unbusy(struct dw_mci *host)
1266 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1267 unsigned long time_loop;
1268 unsigned int status;
1271 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1273 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1274 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1275 /* Special care for (secure)erase timeout calculation */
1277 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1280 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1281 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1282 300000 * (this_card->ext_csd.sec_erase_mult)) :
1283 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1287 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1288 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1289 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1290 timeout = SDMMC_DATA_TIMEOUT_SD;
1293 time_loop = jiffies + msecs_to_jiffies(timeout);
1295 status = mci_readl(host, STATUS);
1296 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1298 } while (time_before(jiffies, time_loop));
1303 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1306 * 0--status is busy.
1307 * 1--status is unbusy.
1309 int dw_mci_card_busy(struct mmc_host *mmc)
1311 struct dw_mci_slot *slot = mmc_priv(mmc);
1312 struct dw_mci *host = slot->host;
1314 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1315 host->svi_flags, mmc_hostname(host->mmc));
1318 if(host->svi_flags == 0){
1320 host->svi_flags = 1;
1321 return host->svi_flags;
1324 host->svi_flags = 0;
1325 return host->svi_flags;
1331 static void __dw_mci_start_request(struct dw_mci *host,
1332 struct dw_mci_slot *slot,
1333 struct mmc_command *cmd)
1335 struct mmc_request *mrq;
1336 struct mmc_data *data;
1340 if (host->pdata->select_slot)
1341 host->pdata->select_slot(slot->id);
1343 host->cur_slot = slot;
1346 dw_mci_wait_unbusy(host);
1348 host->pending_events = 0;
1349 host->completed_events = 0;
1350 host->data_status = 0;
1354 dw_mci_set_timeout(host);
1355 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1356 mci_writel(host, BLKSIZ, data->blksz);
1359 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1361 /* this is the first command, send the initialization clock */
1362 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1363 cmdflags |= SDMMC_CMD_INIT;
1366 dw_mci_submit_data(host, data);
1370 dw_mci_start_command(host, cmd, cmdflags);
1373 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1376 static void dw_mci_start_request(struct dw_mci *host,
1377 struct dw_mci_slot *slot)
1379 struct mmc_request *mrq = slot->mrq;
1380 struct mmc_command *cmd;
1382 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1383 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1385 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1386 __dw_mci_start_request(host, slot, cmd);
1389 /* must be called with host->lock held */
1390 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1391 struct mmc_request *mrq)
1393 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1398 if (host->state == STATE_IDLE) {
1399 host->state = STATE_SENDING_CMD;
1400 dw_mci_start_request(host, slot);
1402 list_add_tail(&slot->queue_node, &host->queue);
1406 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1408 struct dw_mci_slot *slot = mmc_priv(mmc);
1409 struct dw_mci *host = slot->host;
1414 * The check for card presence and queueing of the request must be
1415 * atomic, otherwise the card could be removed in between and the
1416 * request wouldn't fail until another card was inserted.
1418 spin_lock_bh(&host->lock);
1420 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1421 spin_unlock_bh(&host->lock);
1422 mrq->cmd->error = -ENOMEDIUM;
1423 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1424 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1426 mmc_request_done(mmc, mrq);
1430 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1431 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1433 dw_mci_queue_request(host, slot, mrq);
1435 spin_unlock_bh(&host->lock);
1438 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1440 struct dw_mci_slot *slot = mmc_priv(mmc);
1441 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1442 struct dw_mci *host = slot->host;
1444 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1447 #ifdef SDMMC_WAIT_FOR_UNBUSY
1448 unsigned long time_loop;
1451 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1452 if(host->svi_flags == 1)
1453 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1455 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1457 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1460 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1461 printk("%d..%s: no card. [%s]\n", \
1462 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1467 ret = time_before(jiffies, time_loop);
1468 regs = mci_readl(slot->host, STATUS);
1469 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1475 printk("slot->flags = %lu ", slot->flags);
1476 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1477 if(host->svi_flags != 1)
1480 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1481 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1485 switch (ios->bus_width) {
1486 case MMC_BUS_WIDTH_4:
1487 slot->ctype = SDMMC_CTYPE_4BIT;
1489 case MMC_BUS_WIDTH_8:
1490 slot->ctype = SDMMC_CTYPE_8BIT;
1493 /* set default 1 bit mode */
1494 slot->ctype = SDMMC_CTYPE_1BIT;
1495 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1498 regs = mci_readl(slot->host, UHS_REG);
1501 if (ios->timing == MMC_TIMING_UHS_DDR50)
1502 regs |= ((0x1 << slot->id) << 16);
1504 regs &= ~((0x1 << slot->id) << 16);
1506 mci_writel(slot->host, UHS_REG, regs);
1507 slot->host->timing = ios->timing;
1510 * Use mirror of ios->clock to prevent race with mmc
1511 * core ios update when finding the minimum.
1513 slot->clock = ios->clock;
1515 if (drv_data && drv_data->set_ios)
1516 drv_data->set_ios(slot->host, ios);
1518 /* Slot specific timing and width adjustment */
1519 dw_mci_setup_bus(slot, false);
1523 switch (ios->power_mode) {
1525 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1527 if (slot->host->pdata->setpower)
1528 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1529 regs = mci_readl(slot->host, PWREN);
1530 regs |= (1 << slot->id);
1531 mci_writel(slot->host, PWREN, regs);
1534 /* Power down slot */
1535 if(slot->host->pdata->setpower)
1536 slot->host->pdata->setpower(slot->id, 0);
1537 regs = mci_readl(slot->host, PWREN);
1538 regs &= ~(1 << slot->id);
1539 mci_writel(slot->host, PWREN, regs);
1546 static int dw_mci_get_ro(struct mmc_host *mmc)
1549 struct dw_mci_slot *slot = mmc_priv(mmc);
1550 struct dw_mci_board *brd = slot->host->pdata;
1552 /* Use platform get_ro function, else try on board write protect */
1553 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1555 else if(brd->get_ro)
1556 read_only = brd->get_ro(slot->id);
1557 else if(gpio_is_valid(slot->wp_gpio))
1558 read_only = gpio_get_value(slot->wp_gpio);
1561 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1563 dev_dbg(&mmc->class_dev, "card is %s\n",
1564 read_only ? "read-only" : "read-write");
1569 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1571 struct dw_mci_slot *slot = mmc_priv(mmc);
1572 struct dw_mci *host = slot->host;
1573 /*struct dw_mci_board *brd = slot->host->pdata;*/
1575 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1578 spin_lock_bh(&host->lock);
1581 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1583 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1585 spin_unlock_bh(&host->lock);
1587 if (test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1588 if (__clk_is_enabled(host->hclk_mmc) == false)
1589 clk_prepare_enable(host->hclk_mmc);
1590 if (__clk_is_enabled(host->clk_mmc) == false)
1591 clk_prepare_enable(host->clk_mmc);
1593 if (__clk_is_enabled(host->clk_mmc) == true)
1594 clk_disable_unprepare(slot->host->clk_mmc);
1595 if (__clk_is_enabled(host->hclk_mmc) == true)
1596 clk_disable_unprepare(slot->host->hclk_mmc);
1599 mmc_detect_change(slot->mmc, 20);
1605 static int dw_mci_get_cd(struct mmc_host *mmc)
1608 struct dw_mci_slot *slot = mmc_priv(mmc);
1609 struct dw_mci_board *brd = slot->host->pdata;
1610 struct dw_mci *host = slot->host;
1611 int gpio_cd = mmc_gpio_get_cd(mmc);
1612 int force_jtag_bit, force_jtag_reg;
1616 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1617 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1618 gpio_cd = slot->cd_gpio;
1619 irq = gpio_to_irq(gpio_cd);
1620 if (gpio_is_valid(gpio_cd)) {
1621 gpio_val = gpio_get_value(gpio_cd);
1622 if (soc_is_rk3036()) {
1623 force_jtag_bit = 11;
1624 force_jtag_reg = RK312X_GRF_SOC_CON0;
1625 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1626 force_jtag_reg = RK312X_GRF_SOC_CON0;
1630 if (gpio_val == gpio_get_value(gpio_cd)) {
1631 gpio_cd = (gpio_val == 0 ? 1 : 0);
1633 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1634 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1635 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1638 dw_mci_ctrl_all_reset(host);
1640 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1641 /* Really card detected: SHOULD disable force_jtag */
1642 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1647 gpio_val = gpio_get_value(gpio_cd);
1649 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1650 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1651 return slot->last_detect_state;
1654 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1658 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1659 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1661 /* Use platform get_cd function, else try onboard card detect */
1662 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1664 else if (brd->get_cd)
1665 present = !brd->get_cd(slot->id);
1666 else if (!IS_ERR_VALUE(gpio_cd))
1669 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1672 spin_lock_bh(&host->lock);
1674 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1675 dev_dbg(&mmc->class_dev, "card is present\n");
1677 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1678 dev_dbg(&mmc->class_dev, "card is not present\n");
1680 spin_unlock_bh(&host->lock);
1687 * Dts Should caps emmc controller with poll-hw-reset
1689 static void dw_mci_hw_reset(struct mmc_host *mmc)
1691 struct dw_mci_slot *slot = mmc_priv(mmc);
1692 struct dw_mci *host = slot->host;
1697 unsigned long timeout;
1700 /* (1) CMD12 to end any transfer in process */
1701 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1702 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1704 if(host->mmc->hold_reg_flag)
1705 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1706 mci_writel(host, CMDARG, 0);
1708 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1710 timeout = jiffies + msecs_to_jiffies(500);
1712 ret = time_before(jiffies, timeout);
1713 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1718 MMC_DBG_ERR_FUNC(host->mmc,
1719 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1720 __func__, mmc_hostname(host->mmc));
1722 /* (2) wait DTO, even if no response is sent back by card */
1724 timeout = jiffies + msecs_to_jiffies(5);
1726 ret = time_before(jiffies, timeout);
1727 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1728 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1734 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1736 /* Software reset - BMOD[0] for IDMA only */
1737 regs = mci_readl(host, BMOD);
1738 regs |= SDMMC_IDMAC_SWRESET;
1739 mci_writel(host, BMOD, regs);
1740 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1741 regs = mci_readl(host, BMOD);
1742 if(regs & SDMMC_IDMAC_SWRESET)
1743 MMC_DBG_WARN_FUNC(host->mmc,
1744 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1745 __func__, mmc_hostname(host->mmc));
1747 /* DMA reset - CTRL[2] */
1748 regs = mci_readl(host, CTRL);
1749 regs |= SDMMC_CTRL_DMA_RESET;
1750 mci_writel(host, CTRL, regs);
1751 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1752 regs = mci_readl(host, CTRL);
1753 if(regs & SDMMC_CTRL_DMA_RESET)
1754 MMC_DBG_WARN_FUNC(host->mmc,
1755 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1756 __func__, mmc_hostname(host->mmc));
1758 /* FIFO reset - CTRL[1] */
1759 regs = mci_readl(host, CTRL);
1760 regs |= SDMMC_CTRL_FIFO_RESET;
1761 mci_writel(host, CTRL, regs);
1762 mdelay(1); /* no timing limited, 1ms is random value */
1763 regs = mci_readl(host, CTRL);
1764 if(regs & SDMMC_CTRL_FIFO_RESET)
1765 MMC_DBG_WARN_FUNC(host->mmc,
1766 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1767 __func__, mmc_hostname(host->mmc));
1770 According to eMMC spec
1771 tRstW >= 1us ; RST_n pulse width
1772 tRSCA >= 200us ; RST_n to Command time
1773 tRSTH >= 1us ; RST_n high period
1775 mci_writel(slot->host, PWREN, 0x0);
1776 mci_writel(slot->host, RST_N, 0x0);
1778 udelay(10); /* 10us for bad quality eMMc. */
1780 mci_writel(slot->host, PWREN, 0x1);
1781 mci_writel(slot->host, RST_N, 0x1);
1783 usleep_range(500, 1000); /* at least 500(> 200us) */
1787 * Disable lower power mode.
1789 * Low power mode will stop the card clock when idle. According to the
1790 * description of the CLKENA register we should disable low power mode
1791 * for SDIO cards if we need SDIO interrupts to work.
1793 * This function is fast if low power mode is already disabled.
1795 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1797 struct dw_mci *host = slot->host;
1799 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1801 clk_en_a = mci_readl(host, CLKENA);
1803 if (clk_en_a & clken_low_pwr) {
1804 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1805 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1806 SDMMC_CMD_PRV_DAT_WAIT, 0);
1810 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1812 struct dw_mci_slot *slot = mmc_priv(mmc);
1813 struct dw_mci *host = slot->host;
1814 unsigned long flags;
1818 spin_lock_irqsave(&host->slock, flags);
1820 /* Enable/disable Slot Specific SDIO interrupt */
1821 int_mask = mci_readl(host, INTMASK);
1823 if (host->verid < DW_MMC_240A)
1824 sdio_int = SDMMC_INT_SDIO(slot->id);
1826 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1830 * Turn off low power mode if it was enabled. This is a bit of
1831 * a heavy operation and we disable / enable IRQs a lot, so
1832 * we'll leave low power mode disabled and it will get
1833 * re-enabled again in dw_mci_setup_bus().
1835 dw_mci_disable_low_power(slot);
1837 mci_writel(host, INTMASK,
1838 (int_mask | sdio_int));
1840 mci_writel(host, INTMASK,
1841 (int_mask & ~sdio_int));
1844 spin_unlock_irqrestore(&host->slock, flags);
1847 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1849 IO_DOMAIN_12 = 1200,
1850 IO_DOMAIN_18 = 1800,
1851 IO_DOMAIN_33 = 3300,
1853 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1863 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1864 __FUNCTION__, mmc_hostname(host->mmc));
1867 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1868 __FUNCTION__, mmc_hostname(host->mmc));
1872 if (cpu_is_rk3288()) {
1873 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1874 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1877 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1878 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1879 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1883 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1884 __FUNCTION__, mmc_hostname(host->mmc));
1888 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1889 struct mmc_ios *ios)
1892 unsigned int value,uhs_reg;
1895 * Signal Voltage Switching is only applicable for Host Controllers
1898 if (host->verid < DW_MMC_240A)
1901 uhs_reg = mci_readl(host, UHS_REG);
1902 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1903 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1905 switch (ios->signal_voltage) {
1906 case MMC_SIGNAL_VOLTAGE_330:
1907 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1909 if (cpu_is_rk3288())
1910 ret = io_domain_regulator_set_voltage(
1911 host->vmmc, 3300000, 3300000);
1913 ret = regulator_set_voltage(host->vmmc, 3300000, 3300000);
1915 /* regulator_put(host->vmmc); //to be done in remove function. */
1917 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1918 __func__, regulator_get_voltage(host->vmmc), ret);
1920 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1921 " failed\n", mmc_hostname(host->mmc));
1924 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1926 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1927 __FUNCTION__, mmc_hostname(host->mmc));
1929 /* set High-power mode */
1930 value = mci_readl(host, CLKENA);
1931 value &= ~SDMMC_CLKEN_LOW_PWR;
1932 mci_writel(host,CLKENA , value);
1934 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1935 mci_writel(host,UHS_REG , uhs_reg);
1938 usleep_range(5000, 5500);
1940 /* 3.3V regulator output should be stable within 5 ms */
1941 uhs_reg = mci_readl(host, UHS_REG);
1942 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1945 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1946 mmc_hostname(host->mmc));
1949 case MMC_SIGNAL_VOLTAGE_180:
1951 if (cpu_is_rk3288())
1952 ret = io_domain_regulator_set_voltage(
1956 ret = regulator_set_voltage(
1959 /* regulator_put(host->vmmc);//to be done in remove function. */
1961 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1962 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1964 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1965 " failed\n", mmc_hostname(host->mmc));
1968 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1972 * Enable 1.8V Signal Enable in the Host Control2
1975 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1978 usleep_range(5000, 5500);
1979 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1980 __FUNCTION__,mmc_hostname(host->mmc));
1982 /* 1.8V regulator output should be stable within 5 ms */
1983 uhs_reg = mci_readl(host, UHS_REG);
1984 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1987 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1988 mmc_hostname(host->mmc));
1991 case MMC_SIGNAL_VOLTAGE_120:
1993 if (cpu_is_rk3288())
1994 ret = io_domain_regulator_set_voltage(
1998 ret = regulator_set_voltage(host->vmmc,
2001 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
2002 " failed\n", mmc_hostname(host->mmc));
2008 /* No signal voltage switch required */
2014 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
2015 struct mmc_ios *ios)
2017 struct dw_mci_slot *slot = mmc_priv(mmc);
2018 struct dw_mci *host = slot->host;
2021 if (host->verid < DW_MMC_240A)
2024 err = dw_mci_do_start_signal_voltage_switch(host, ios);
2030 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2032 struct dw_mci_slot *slot = mmc_priv(mmc);
2033 struct dw_mci *host = slot->host;
2034 const struct dw_mci_drv_data *drv_data = host->drv_data;
2035 struct dw_mci_tuning_data tuning_data;
2038 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
2039 if(cpu_is_rk3036() || cpu_is_rk312x())
2042 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
2043 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
2044 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
2045 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
2046 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
2047 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2048 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2052 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
2053 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2054 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2057 "Undefined command(%d) for tuning\n", opcode);
2062 /* Recommend sample phase and delayline
2063 Fixme: Mix-use these three controllers will cause
2066 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
2067 tuning_data.con_id = 3;
2068 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2069 tuning_data.con_id = 1;
2071 tuning_data.con_id = 0;
2073 /* 0: driver, from host->devices
2074 1: sample, from devices->host
2076 tuning_data.tuning_type = 1;
2078 if (drv_data && drv_data->execute_tuning)
2079 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2084 static void dw_mci_post_tmo(struct mmc_host *mmc)
2086 struct dw_mci_slot *slot = mmc_priv(mmc);
2087 struct dw_mci *host = slot->host;
2088 struct mmc_data *data;
2089 u32 ret, i, regs, cmd_flags;
2091 unsigned long timeout = 0;
2092 bool ret_timeout = true;
2094 host->cur_slot->mrq = NULL;
2096 host->state = STATE_IDLE;
2100 printk("[%s] -- Timeout recovery procedure start --\n",
2101 mmc_hostname(host->mmc));
2103 if (data && (data->stop)) {
2104 send_stop_cmd(host, data);
2106 mci_writel(host, CMDARG, 0);
2108 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC |
2109 SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2111 if (host->mmc->hold_reg_flag)
2112 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2114 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2116 timeout = jiffies + msecs_to_jiffies(500);
2118 while(ret_timeout) {
2119 ret_timeout = time_before(jiffies, timeout);
2120 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2124 if (false == ret_timeout)
2125 MMC_DBG_ERR_FUNC(host->mmc, "stop recovery failed![%s]",
2126 mmc_hostname(host->mmc));
2129 if (!dw_mci_ctrl_all_reset(host)) {
2134 #ifdef CONFIG_MMC_DW_IDMAC
2135 if (host->use_dma && host->dma_ops->init)
2136 host->dma_ops->init(host);
2140 * Restore the initial value at FIFOTH register
2141 * And Invalidate the prev_blksz with zero
2143 mci_writel(host, FIFOTH, host->fifoth_val);
2144 host->prev_blksz = 0;
2145 mci_writel(host, TMOUT, 0xFFFFFFFF);
2146 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2147 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
2148 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
2149 if (!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
2150 regs |= SDMMC_INT_CD;
2152 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
2153 if (host->verid < DW_MMC_240A)
2154 sdio_int = SDMMC_INT_SDIO(0);
2156 sdio_int = SDMMC_INT_SDIO(8);
2158 if (mci_readl(host, INTMASK) & sdio_int)
2162 mci_writel(host, INTMASK, regs);
2163 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2164 for (i = 0; i < host->num_slots; i++) {
2165 struct dw_mci_slot *slot = host->slot[i];
2168 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2169 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2170 dw_mci_setup_bus(slot, true);
2173 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2175 printk("[%s] -- Timeout recovery procedure finished --\n",
2176 mmc_hostname(host->mmc));
2180 static const struct mmc_host_ops dw_mci_ops = {
2181 .request = dw_mci_request,
2182 .pre_req = dw_mci_pre_req,
2183 .post_req = dw_mci_post_req,
2184 .set_ios = dw_mci_set_ios,
2185 .get_ro = dw_mci_get_ro,
2186 .get_cd = dw_mci_get_cd,
2187 .set_sdio_status = dw_mci_set_sdio_status,
2188 .hw_reset = dw_mci_hw_reset,
2189 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2190 .execute_tuning = dw_mci_execute_tuning,
2191 .post_tmo = dw_mci_post_tmo,
2192 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2193 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2194 .card_busy = dw_mci_card_busy,
2199 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2201 unsigned long flags;
2206 local_irq_save(flags);
2207 if(host->irq_state != irqflag)
2209 host->irq_state = irqflag;
2212 enable_irq(host->irq);
2216 disable_irq(host->irq);
2219 local_irq_restore(flags);
2223 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2224 __releases(&host->lock)
2225 __acquires(&host->lock)
2227 if(DW_MCI_SEND_STATUS == host->dir_status){
2229 if( MMC_BUS_TEST_W != host->cmd->opcode){
2230 if(host->data_status & SDMMC_INT_DCRC)
2231 host->data->error = -EILSEQ;
2232 else if(host->data_status & SDMMC_INT_EBE)
2233 host->data->error = -ETIMEDOUT;
2235 dw_mci_wait_unbusy(host);
2238 dw_mci_wait_unbusy(host);
2243 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2244 __releases(&host->lock)
2245 __acquires(&host->lock)
2247 struct dw_mci_slot *slot;
2248 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2250 //WARN_ON(host->cmd || host->data);
2252 dw_mci_deal_data_end(host, mrq);
2255 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2256 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2258 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2259 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2261 host->cur_slot->mrq = NULL;
2263 if (!list_empty(&host->queue)) {
2264 slot = list_entry(host->queue.next,
2265 struct dw_mci_slot, queue_node);
2266 list_del(&slot->queue_node);
2267 dev_vdbg(host->dev, "list not empty: %s is next\n",
2268 mmc_hostname(slot->mmc));
2269 host->state = STATE_SENDING_CMD;
2270 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2271 dw_mci_start_request(host, slot);
2273 dev_vdbg(host->dev, "list empty\n");
2274 host->state = STATE_IDLE;
2277 spin_unlock(&host->lock);
2278 mmc_request_done(prev_mmc, mrq);
2279 spin_lock(&host->lock);
2282 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2284 u32 status = host->cmd_status;
2286 host->cmd_status = 0;
2288 /* Read the response from the card (up to 16 bytes) */
2289 if (cmd->flags & MMC_RSP_PRESENT) {
2290 if (cmd->flags & MMC_RSP_136) {
2291 cmd->resp[3] = mci_readl(host, RESP0);
2292 cmd->resp[2] = mci_readl(host, RESP1);
2293 cmd->resp[1] = mci_readl(host, RESP2);
2294 cmd->resp[0] = mci_readl(host, RESP3);
2296 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2297 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2299 cmd->resp[0] = mci_readl(host, RESP0);
2303 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2304 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2308 if (status & SDMMC_INT_RTO)
2310 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2313 cmd->error = -ETIMEDOUT;
2314 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2315 cmd->error = -EILSEQ;
2316 }else if (status & SDMMC_INT_RESP_ERR){
2321 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2322 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2325 if(MMC_SEND_STATUS != cmd->opcode)
2326 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2327 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2328 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2332 /* newer ip versions need a delay between retries */
2333 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2339 static void dw_mci_tasklet_func(unsigned long priv)
2341 struct dw_mci *host = (struct dw_mci *)priv;
2342 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2343 struct mmc_data *data;
2344 struct mmc_command *cmd;
2345 enum dw_mci_state state;
2346 enum dw_mci_state prev_state;
2347 u32 status, cmd_flags;
2348 unsigned long timeout = 0;
2351 spin_lock(&host->lock);
2353 state = host->state;
2363 case STATE_SENDING_CMD:
2364 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2365 &host->pending_events))
2370 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2371 dw_mci_command_complete(host, cmd);
2372 if (cmd == host->mrq->sbc && !cmd->error) {
2373 prev_state = state = STATE_SENDING_CMD;
2374 __dw_mci_start_request(host, host->cur_slot,
2379 if (cmd->data && cmd->error) {
2380 dw_mci_stop_dma(host);
2383 send_stop_cmd(host, data);
2384 state = STATE_SENDING_STOP;
2387 /* host->data = NULL; */
2390 send_stop_abort(host, data);
2391 state = STATE_SENDING_STOP;
2394 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2397 if (!host->mrq->data || cmd->error) {
2398 dw_mci_request_end(host, host->mrq);
2402 prev_state = state = STATE_SENDING_DATA;
2405 case STATE_SENDING_DATA:
2406 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2407 dw_mci_stop_dma(host);
2410 send_stop_cmd(host, data);
2412 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2413 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2414 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2416 mci_writel(host, CMDARG, 0);
2418 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2419 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2421 if(host->mmc->hold_reg_flag)
2422 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2424 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2426 timeout = jiffies + msecs_to_jiffies(500);
2429 ret = time_before(jiffies, timeout);
2430 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2434 MMC_DBG_ERR_FUNC(host->mmc,
2435 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2436 __func__, mmc_hostname(host->mmc));
2439 send_stop_abort(host, data);
2441 state = STATE_DATA_ERROR;
2445 MMC_DBG_CMD_FUNC(host->mmc,
2446 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2447 prev_state,state, mmc_hostname(host->mmc));
2449 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2450 &host->pending_events))
2452 MMC_DBG_INFO_FUNC(host->mmc,
2453 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2454 prev_state,state,mmc_hostname(host->mmc));
2456 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2457 prev_state = state = STATE_DATA_BUSY;
2460 case STATE_DATA_BUSY:
2461 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2462 &host->pending_events))
2465 dw_mci_deal_data_end(host, host->mrq);
2466 MMC_DBG_INFO_FUNC(host->mmc,
2467 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2468 prev_state,state,mmc_hostname(host->mmc));
2470 /* host->data = NULL; */
2471 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2472 status = host->data_status;
2474 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2475 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2476 MMC_DBG_ERR_FUNC(host->mmc,
2477 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2478 prev_state,state, status, mmc_hostname(host->mmc));
2480 if (status & SDMMC_INT_DRTO) {
2481 data->error = -ETIMEDOUT;
2482 } else if (status & SDMMC_INT_DCRC) {
2483 data->error = -EILSEQ;
2484 } else if (status & SDMMC_INT_EBE &&
2485 host->dir_status == DW_MCI_SEND_STATUS){
2487 * No data CRC status was returned.
2488 * The number of bytes transferred will
2489 * be exaggerated in PIO mode.
2491 data->bytes_xfered = 0;
2492 data->error = -ETIMEDOUT;
2501 * After an error, there may be data lingering
2502 * in the FIFO, so reset it - doing so
2503 * generates a block interrupt, hence setting
2504 * the scatter-gather pointer to NULL.
2506 dw_mci_fifo_reset(host);
2508 data->bytes_xfered = data->blocks * data->blksz;
2513 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2514 prev_state,state,mmc_hostname(host->mmc));
2515 dw_mci_request_end(host, host->mrq);
2518 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2519 prev_state,state,mmc_hostname(host->mmc));
2521 if (host->mrq->sbc && !data->error) {
2522 data->stop->error = 0;
2524 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2525 prev_state,state,mmc_hostname(host->mmc));
2527 dw_mci_request_end(host, host->mrq);
2531 prev_state = state = STATE_SENDING_STOP;
2533 send_stop_cmd(host, data);
2535 if (data->stop && !data->error) {
2536 /* stop command for open-ended transfer*/
2538 send_stop_abort(host, data);
2542 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2543 prev_state,state,mmc_hostname(host->mmc));
2545 case STATE_SENDING_STOP:
2546 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2549 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2550 prev_state, state, mmc_hostname(host->mmc));
2552 /* CMD error in data command */
2553 if (host->mrq->cmd->error && host->mrq->data) {
2554 dw_mci_fifo_reset(host);
2558 host->data = NULL; */
2560 dw_mci_command_complete(host, host->mrq->stop);
2562 if (host->mrq->stop)
2563 dw_mci_command_complete(host, host->mrq->stop);
2565 host->cmd_status = 0;
2568 dw_mci_request_end(host, host->mrq);
2571 case STATE_DATA_ERROR:
2572 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2573 &host->pending_events))
2576 state = STATE_DATA_BUSY;
2579 } while (state != prev_state);
2581 host->state = state;
2583 spin_unlock(&host->lock);
2587 /* push final bytes to part_buf, only use during push */
2588 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2590 memcpy((void *)&host->part_buf, buf, cnt);
2591 host->part_buf_count = cnt;
2594 /* append bytes to part_buf, only use during push */
2595 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2597 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2598 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2599 host->part_buf_count += cnt;
2603 /* pull first bytes from part_buf, only use during pull */
2604 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2606 cnt = min(cnt, (int)host->part_buf_count);
2608 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2610 host->part_buf_count -= cnt;
2611 host->part_buf_start += cnt;
2616 /* pull final bytes from the part_buf, assuming it's just been filled */
2617 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2619 memcpy(buf, &host->part_buf, cnt);
2620 host->part_buf_start = cnt;
2621 host->part_buf_count = (1 << host->data_shift) - cnt;
2624 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2626 struct mmc_data *data = host->data;
2629 /* try and push anything in the part_buf */
2630 if (unlikely(host->part_buf_count)) {
2631 int len = dw_mci_push_part_bytes(host, buf, cnt);
2634 if (host->part_buf_count == 2) {
2635 mci_writew(host, DATA(host->data_offset),
2637 host->part_buf_count = 0;
2640 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2641 if (unlikely((unsigned long)buf & 0x1)) {
2643 u16 aligned_buf[64];
2644 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2645 int items = len >> 1;
2647 /* memcpy from input buffer into aligned buffer */
2648 memcpy(aligned_buf, buf, len);
2651 /* push data from aligned buffer into fifo */
2652 for (i = 0; i < items; ++i)
2653 mci_writew(host, DATA(host->data_offset),
2660 for (; cnt >= 2; cnt -= 2)
2661 mci_writew(host, DATA(host->data_offset), *pdata++);
2664 /* put anything remaining in the part_buf */
2666 dw_mci_set_part_bytes(host, buf, cnt);
2667 /* Push data if we have reached the expected data length */
2668 if ((data->bytes_xfered + init_cnt) ==
2669 (data->blksz * data->blocks))
2670 mci_writew(host, DATA(host->data_offset),
2675 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2677 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2678 if (unlikely((unsigned long)buf & 0x1)) {
2680 /* pull data from fifo into aligned buffer */
2681 u16 aligned_buf[64];
2682 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2683 int items = len >> 1;
2685 for (i = 0; i < items; ++i)
2686 aligned_buf[i] = mci_readw(host,
2687 DATA(host->data_offset));
2688 /* memcpy from aligned buffer into output buffer */
2689 memcpy(buf, aligned_buf, len);
2697 for (; cnt >= 2; cnt -= 2)
2698 *pdata++ = mci_readw(host, DATA(host->data_offset));
2702 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2703 dw_mci_pull_final_bytes(host, buf, cnt);
2707 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2709 struct mmc_data *data = host->data;
2712 /* try and push anything in the part_buf */
2713 if (unlikely(host->part_buf_count)) {
2714 int len = dw_mci_push_part_bytes(host, buf, cnt);
2717 if (host->part_buf_count == 4) {
2718 mci_writel(host, DATA(host->data_offset),
2720 host->part_buf_count = 0;
2723 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2724 if (unlikely((unsigned long)buf & 0x3)) {
2726 u32 aligned_buf[32];
2727 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2728 int items = len >> 2;
2730 /* memcpy from input buffer into aligned buffer */
2731 memcpy(aligned_buf, buf, len);
2734 /* push data from aligned buffer into fifo */
2735 for (i = 0; i < items; ++i)
2736 mci_writel(host, DATA(host->data_offset),
2743 for (; cnt >= 4; cnt -= 4)
2744 mci_writel(host, DATA(host->data_offset), *pdata++);
2747 /* put anything remaining in the part_buf */
2749 dw_mci_set_part_bytes(host, buf, cnt);
2750 /* Push data if we have reached the expected data length */
2751 if ((data->bytes_xfered + init_cnt) ==
2752 (data->blksz * data->blocks))
2753 mci_writel(host, DATA(host->data_offset),
2758 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2760 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2761 if (unlikely((unsigned long)buf & 0x3)) {
2763 /* pull data from fifo into aligned buffer */
2764 u32 aligned_buf[32];
2765 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2766 int items = len >> 2;
2768 for (i = 0; i < items; ++i)
2769 aligned_buf[i] = mci_readl(host,
2770 DATA(host->data_offset));
2771 /* memcpy from aligned buffer into output buffer */
2772 memcpy(buf, aligned_buf, len);
2780 for (; cnt >= 4; cnt -= 4)
2781 *pdata++ = mci_readl(host, DATA(host->data_offset));
2785 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2786 dw_mci_pull_final_bytes(host, buf, cnt);
2790 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2792 struct mmc_data *data = host->data;
2795 /* try and push anything in the part_buf */
2796 if (unlikely(host->part_buf_count)) {
2797 int len = dw_mci_push_part_bytes(host, buf, cnt);
2801 if (host->part_buf_count == 8) {
2802 mci_writeq(host, DATA(host->data_offset),
2804 host->part_buf_count = 0;
2807 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2808 if (unlikely((unsigned long)buf & 0x7)) {
2810 u64 aligned_buf[16];
2811 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2812 int items = len >> 3;
2814 /* memcpy from input buffer into aligned buffer */
2815 memcpy(aligned_buf, buf, len);
2818 /* push data from aligned buffer into fifo */
2819 for (i = 0; i < items; ++i)
2820 mci_writeq(host, DATA(host->data_offset),
2827 for (; cnt >= 8; cnt -= 8)
2828 mci_writeq(host, DATA(host->data_offset), *pdata++);
2831 /* put anything remaining in the part_buf */
2833 dw_mci_set_part_bytes(host, buf, cnt);
2834 /* Push data if we have reached the expected data length */
2835 if ((data->bytes_xfered + init_cnt) ==
2836 (data->blksz * data->blocks))
2837 mci_writeq(host, DATA(host->data_offset),
2842 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2844 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2845 if (unlikely((unsigned long)buf & 0x7)) {
2847 /* pull data from fifo into aligned buffer */
2848 u64 aligned_buf[16];
2849 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2850 int items = len >> 3;
2852 for (i = 0; i < items; ++i)
2853 aligned_buf[i] = mci_readq(host,
2854 DATA(host->data_offset));
2855 /* memcpy from aligned buffer into output buffer */
2856 memcpy(buf, aligned_buf, len);
2864 for (; cnt >= 8; cnt -= 8)
2865 *pdata++ = mci_readq(host, DATA(host->data_offset));
2869 host->part_buf = mci_readq(host, DATA(host->data_offset));
2870 dw_mci_pull_final_bytes(host, buf, cnt);
2874 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2878 /* get remaining partial bytes */
2879 len = dw_mci_pull_part_bytes(host, buf, cnt);
2880 if (unlikely(len == cnt))
2885 /* get the rest of the data */
2886 host->pull_data(host, buf, cnt);
2889 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2891 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2893 unsigned int offset;
2894 struct mmc_data *data = host->data;
2895 int shift = host->data_shift;
2898 unsigned int remain, fcnt;
2900 if(!host->mmc->bus_refs){
2901 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2905 if (!sg_miter_next(sg_miter))
2908 host->sg = sg_miter->piter.sg;
2909 buf = sg_miter->addr;
2910 remain = sg_miter->length;
2914 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2915 << shift) + host->part_buf_count;
2916 len = min(remain, fcnt);
2919 dw_mci_pull_data(host, (void *)(buf + offset), len);
2920 data->bytes_xfered += len;
2925 sg_miter->consumed = offset;
2926 status = mci_readl(host, MINTSTS);
2927 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2928 /* if the RXDR is ready read again */
2929 } while ((status & SDMMC_INT_RXDR) ||
2930 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2933 if (!sg_miter_next(sg_miter))
2935 sg_miter->consumed = 0;
2937 sg_miter_stop(sg_miter);
2941 sg_miter_stop(sg_miter);
2945 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2948 static void dw_mci_write_data_pio(struct dw_mci *host)
2950 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2952 unsigned int offset;
2953 struct mmc_data *data = host->data;
2954 int shift = host->data_shift;
2957 unsigned int fifo_depth = host->fifo_depth;
2958 unsigned int remain, fcnt;
2960 if(!host->mmc->bus_refs){
2961 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2966 if (!sg_miter_next(sg_miter))
2969 host->sg = sg_miter->piter.sg;
2970 buf = sg_miter->addr;
2971 remain = sg_miter->length;
2975 fcnt = ((fifo_depth -
2976 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2977 << shift) - host->part_buf_count;
2978 len = min(remain, fcnt);
2981 host->push_data(host, (void *)(buf + offset), len);
2982 data->bytes_xfered += len;
2987 sg_miter->consumed = offset;
2988 status = mci_readl(host, MINTSTS);
2989 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2990 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2993 if (!sg_miter_next(sg_miter))
2995 sg_miter->consumed = 0;
2997 sg_miter_stop(sg_miter);
3001 sg_miter_stop(sg_miter);
3005 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
3008 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
3010 if (!host->cmd_status)
3011 host->cmd_status = status;
3018 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3019 tasklet_schedule(&host->tasklet);
3022 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
3024 struct dw_mci *host = dev_id;
3025 u32 pending, sdio_int;
3028 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3031 * DTO fix - version 2.10a and below, and only if internal DMA
3034 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
3036 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
3037 pending |= SDMMC_INT_DATA_OVER;
3041 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
3042 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
3043 host->cmd_status = pending;
3045 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
3046 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
3048 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3051 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
3052 /* if there is an error report DATA_ERROR */
3053 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
3054 host->data_status = pending;
3056 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3058 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
3059 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
3060 tasklet_schedule(&host->tasklet);
3063 if (pending & SDMMC_INT_DATA_OVER) {
3064 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
3065 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
3066 if (!host->data_status)
3067 host->data_status = pending;
3069 if (host->dir_status == DW_MCI_RECV_STATUS) {
3070 if (host->sg != NULL)
3071 dw_mci_read_data_pio(host, true);
3073 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3074 tasklet_schedule(&host->tasklet);
3077 if (pending & SDMMC_INT_RXDR) {
3078 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
3079 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
3080 dw_mci_read_data_pio(host, false);
3083 if (pending & SDMMC_INT_TXDR) {
3084 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
3085 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
3086 dw_mci_write_data_pio(host);
3089 if (pending & SDMMC_INT_VSI) {
3090 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
3091 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
3092 dw_mci_cmd_interrupt(host, pending);
3095 if (pending & SDMMC_INT_CMD_DONE) {
3096 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
3097 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
3098 dw_mci_cmd_interrupt(host, pending);
3101 if (pending & SDMMC_INT_CD) {
3102 mci_writel(host, RINTSTS, SDMMC_INT_CD);
3103 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
3104 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
3105 queue_work(host->card_workqueue, &host->card_work);
3108 if (pending & SDMMC_INT_HLE) {
3109 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
3110 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
3114 /* Handle SDIO Interrupts */
3115 for (i = 0; i < host->num_slots; i++) {
3116 struct dw_mci_slot *slot = host->slot[i];
3118 if (host->verid < DW_MMC_240A)
3119 sdio_int = SDMMC_INT_SDIO(i);
3121 sdio_int = SDMMC_INT_SDIO(i + 8);
3123 if (pending & sdio_int) {
3124 mci_writel(host, RINTSTS, sdio_int);
3125 mmc_signal_sdio_irq(slot->mmc);
3131 #ifdef CONFIG_MMC_DW_IDMAC
3132 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
3133 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
3134 /* Handle DMA interrupts */
3135 pending = mci_readl(host, IDSTS);
3136 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
3137 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
3138 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
3139 host->dma_ops->complete((void *)host);
3147 static void dw_mci_work_routine_card(struct work_struct *work)
3149 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
3152 for (i = 0; i < host->num_slots; i++) {
3153 struct dw_mci_slot *slot = host->slot[i];
3154 struct mmc_host *mmc = slot->mmc;
3155 struct mmc_request *mrq;
3158 present = dw_mci_get_cd(mmc);
3160 /* Card insert, switch data line to uart function, and vice verse.
3161 eONLY audi chip need switched by software, using udbg tag in dts!
3163 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3165 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3166 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3167 mmc_hostname(host->mmc));
3169 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3170 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3171 mmc_hostname(host->mmc));
3175 while (present != slot->last_detect_state) {
3176 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3177 present ? "inserted" : "removed");
3178 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3179 present ? "inserted" : "removed.", mmc_hostname(mmc));
3181 dw_mci_ctrl_all_reset(host);
3182 /* Stop edma when rountine card triggered */
3183 if(cpu_is_rk3036() || cpu_is_rk312x())
3184 if(host->dma_ops && host->dma_ops->stop)
3185 host->dma_ops->stop(host);
3186 rk_send_wakeup_key();//wake up system
3187 spin_lock_bh(&host->lock);
3189 /* Card change detected */
3190 slot->last_detect_state = present;
3192 /* Clean up queue if present */
3195 if (mrq == host->mrq) {
3199 switch (host->state) {
3202 case STATE_SENDING_CMD:
3203 mrq->cmd->error = -ENOMEDIUM;
3207 case STATE_SENDING_DATA:
3208 mrq->data->error = -ENOMEDIUM;
3209 dw_mci_stop_dma(host);
3211 case STATE_DATA_BUSY:
3212 case STATE_DATA_ERROR:
3213 if (mrq->data->error == -EINPROGRESS)
3214 mrq->data->error = -ENOMEDIUM;
3218 case STATE_SENDING_STOP:
3219 mrq->stop->error = -ENOMEDIUM;
3223 dw_mci_request_end(host, mrq);
3225 list_del(&slot->queue_node);
3226 mrq->cmd->error = -ENOMEDIUM;
3228 mrq->data->error = -ENOMEDIUM;
3230 mrq->stop->error = -ENOMEDIUM;
3232 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3233 mrq->cmd->opcode, mmc_hostname(mmc));
3235 spin_unlock(&host->lock);
3236 mmc_request_done(slot->mmc, mrq);
3237 spin_lock(&host->lock);
3241 /* Power down slot */
3243 /* Clear down the FIFO */
3244 dw_mci_fifo_reset(host);
3245 #ifdef CONFIG_MMC_DW_IDMAC
3246 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3247 dw_mci_idmac_reset(host);
3252 spin_unlock_bh(&host->lock);
3254 present = dw_mci_get_cd(mmc);
3257 mmc_detect_change(slot->mmc,
3258 msecs_to_jiffies(host->pdata->detect_delay_ms));
3263 /* given a slot id, find out the device node representing that slot */
3264 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3266 struct device_node *np;
3270 if (!dev || !dev->of_node)
3273 for_each_child_of_node(dev->of_node, np) {
3274 addr = of_get_property(np, "reg", &len);
3275 if (!addr || (len < sizeof(int)))
3277 if (be32_to_cpup(addr) == slot)
3283 static struct dw_mci_of_slot_quirks {
3286 } of_slot_quirks[] = {
3288 .quirk = "disable-wp",
3289 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3293 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3295 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3300 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3301 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3302 quirks |= of_slot_quirks[idx].id;
3307 /* find out bus-width for a given slot */
3308 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3310 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3316 if (of_property_read_u32(np, "bus-width", &bus_wd))
3317 dev_err(dev, "bus-width property not found, assuming width"
3323 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3324 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3326 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3332 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3334 /* Having a missing entry is valid; return silently */
3335 if (!gpio_is_valid(gpio))
3338 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3339 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3343 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3349 /* find the write protect gpio for a given slot; or -1 if none specified */
3350 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3352 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3358 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3360 /* Having a missing entry is valid; return silently */
3361 if (!gpio_is_valid(gpio))
3364 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3365 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3372 /* find the cd gpio for a given slot */
3373 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3374 struct mmc_host *mmc)
3376 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3382 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3384 /* Having a missing entry is valid; return silently */
3385 if (!gpio_is_valid(gpio))
3388 if (mmc_gpio_request_cd(mmc, gpio, 0))
3389 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3392 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3394 struct mmc_host *mmc = dev_id;
3395 struct dw_mci_slot *slot = mmc_priv(mmc);
3396 struct dw_mci *host = slot->host;
3397 int gpio_cd = slot->cd_gpio;
3399 (gpio_get_value(gpio_cd) == 0) ?
3400 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3401 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3403 /* wakeup system whether gpio debounce or not */
3404 rk_send_wakeup_key();
3406 /* no need to trigger detect flow when rescan is disabled.
3407 This case happended in dpm, that we just wakeup system and
3408 let suspend_post notify callback handle it.
3410 if(mmc->rescan_disable == 0)
3411 queue_work(host->card_workqueue, &host->card_work);
3413 printk("%s: rescan been disabled!\n", __FUNCTION__);
3418 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3419 struct mmc_host *mmc)
3421 struct dw_mci_slot *slot = mmc_priv(mmc);
3422 struct dw_mci *host = slot->host;
3426 /* Having a missing entry is valid; return silently */
3427 if (!gpio_is_valid(gpio))
3430 irq = gpio_to_irq(gpio);
3432 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3433 NULL, dw_mci_gpio_cd_irqt,
3434 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3438 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3440 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3441 enable_irq_wake(irq);
3444 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3448 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3449 struct mmc_host *mmc)
3451 if (!gpio_is_valid(gpio))
3454 if (gpio_to_irq(gpio) >= 0) {
3455 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3456 devm_gpio_free(&mmc->class_dev, gpio);
3459 #else /* CONFIG_OF */
3460 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3464 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3468 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3472 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3476 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3477 struct mmc_host *mmc)
3481 #endif /* CONFIG_OF */
3483 /* @host: dw_mci host prvdata
3484 * Init pinctrl for each platform. Usually we assign
3485 * "defalut" tag for functional usage, "idle" tag for gpio
3486 * state and "udbg" tag for uart_dbg if any.
3488 static void dw_mci_init_pinctrl(struct dw_mci *host)
3490 /* Fixme: DON'T TOUCH EMMC SETTING! */
3491 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3494 /* Get pinctrl for DTS */
3495 host->pinctrl = devm_pinctrl_get(host->dev);
3496 if (IS_ERR(host->pinctrl)) {
3497 dev_err(host->dev, "%s: No pinctrl used!\n",
3498 mmc_hostname(host->mmc));
3502 /* Lookup idle state */
3503 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3504 PINCTRL_STATE_IDLE);
3505 if (IS_ERR(host->pins_idle)) {
3506 dev_err(host->dev, "%s: No idle tag found!\n",
3507 mmc_hostname(host->mmc));
3509 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3510 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3511 mmc_hostname(host->mmc));
3514 /* Lookup default state */
3515 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3516 PINCTRL_STATE_DEFAULT);
3517 if (IS_ERR(host->pins_default)) {
3518 dev_err(host->dev, "%s: No default pinctrl found!\n",
3519 mmc_hostname(host->mmc));
3521 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3522 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3523 mmc_hostname(host->mmc));
3526 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3527 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3528 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3529 if (IS_ERR(host->pins_udbg)) {
3530 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3531 mmc_hostname(host->mmc));
3533 if (!dw_mci_get_cd(host->mmc))
3534 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3535 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3536 mmc_hostname(host->mmc));
3541 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3542 unsigned long mode, void *unused)
3544 struct mmc_host *host = container_of(
3545 notify_block, struct mmc_host, pm_notify);
3546 unsigned long flags;
3549 case PM_HIBERNATION_PREPARE:
3550 case PM_SUSPEND_PREPARE:
3551 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3552 spin_lock_irqsave(&host->lock, flags);
3553 host->rescan_disable = 1;
3554 spin_unlock_irqrestore(&host->lock, flags);
3555 if (cancel_delayed_work(&host->detect))
3556 wake_unlock(&host->detect_wake_lock);
3559 case PM_POST_SUSPEND:
3560 case PM_POST_HIBERNATION:
3561 case PM_POST_RESTORE:
3562 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3563 spin_lock_irqsave(&host->lock, flags);
3564 host->rescan_disable = 0;
3565 spin_unlock_irqrestore(&host->lock, flags);
3566 mmc_detect_change(host, 10);
3572 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3574 struct mmc_host *mmc;
3575 struct dw_mci_slot *slot;
3576 const struct dw_mci_drv_data *drv_data = host->drv_data;
3581 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3585 slot = mmc_priv(mmc);
3589 host->slot[id] = slot;
3592 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3594 mmc->ops = &dw_mci_ops;
3596 if (of_property_read_u32_array(host->dev->of_node,
3597 "clock-freq-min-max", freq, 2)) {
3598 mmc->f_min = DW_MCI_FREQ_MIN;
3599 mmc->f_max = DW_MCI_FREQ_MAX;
3601 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3602 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3604 mmc->f_min = freq[0];
3605 mmc->f_max = freq[1];
3607 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3608 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3611 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3613 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3614 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3615 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3616 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3617 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3618 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3620 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3621 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3622 if (register_pm_notifier(&mmc->pm_notify)) {
3623 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3624 goto err_pm_notifier;
3628 if (host->cid == DW_MCI_TYPE_RK3368) {
3629 if (IS_ERR(host->grf))
3630 pr_err("rk_sdmmc: dts couldn't find grf regmap for 3368\n");
3632 /* Disable force_jtag */
3633 regmap_write(host->grf, 0x43c, (1<<13)<<16 | (0 << 13));
3634 } else if (cpu_is_rk3288()) {
3635 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3639 /* We assume only low-level chip use gpio_cd */
3640 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3641 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3642 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3643 if (gpio_is_valid(slot->cd_gpio)) {
3644 /* Request gpio int for card detection */
3645 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3647 slot->cd_gpio = -ENODEV;
3648 dev_err(host->dev, "failed to get your cd-gpios!\n");
3652 if (host->pdata->get_ocr)
3653 mmc->ocr_avail = host->pdata->get_ocr(id);
3656 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3657 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3658 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3659 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3663 * Start with slot power disabled, it will be enabled when a card
3666 if (host->pdata->setpower)
3667 host->pdata->setpower(id, 0);
3669 if (host->pdata->caps)
3670 mmc->caps = host->pdata->caps;
3672 if (host->pdata->pm_caps)
3673 mmc->pm_caps = host->pdata->pm_caps;
3675 if (host->dev->of_node) {
3676 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3680 ctrl_id = to_platform_device(host->dev)->id;
3682 if (drv_data && drv_data->caps)
3683 mmc->caps |= drv_data->caps[ctrl_id];
3684 if (drv_data && drv_data->hold_reg_flag)
3685 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3687 /* set the compatibility of driver. */
3688 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3689 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3691 if (host->pdata->caps2)
3692 mmc->caps2 = host->pdata->caps2;
3694 if (host->pdata->get_bus_wd)
3695 bus_width = host->pdata->get_bus_wd(slot->id);
3696 else if (host->dev->of_node)
3697 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3701 switch (bus_width) {
3703 mmc->caps |= MMC_CAP_8_BIT_DATA;
3705 mmc->caps |= MMC_CAP_4_BIT_DATA;
3708 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3709 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3710 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3711 mmc->caps |= MMC_CAP_SDIO_IRQ;
3712 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3713 mmc->caps |= MMC_CAP_HW_RESET;
3714 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3715 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3716 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3717 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3718 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3719 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3720 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3721 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3723 /*Assign pm_caps pass to pm_flags*/
3724 mmc->pm_flags = mmc->pm_caps;
3726 if (host->pdata->blk_settings) {
3727 mmc->max_segs = host->pdata->blk_settings->max_segs;
3728 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3729 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3730 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3731 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3733 /* Useful defaults if platform data is unset. */
3734 #ifdef CONFIG_MMC_DW_IDMAC
3735 mmc->max_segs = host->ring_size;
3736 mmc->max_blk_size = 65536;
3737 mmc->max_blk_count = host->ring_size;
3738 mmc->max_seg_size = 0x1000;
3739 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3740 if(cpu_is_rk3036() || cpu_is_rk312x()){
3741 /* fixup for external dmac setting */
3743 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3744 mmc->max_blk_count = 65535;
3745 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3746 mmc->max_seg_size = mmc->max_req_size;
3750 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3751 mmc->max_blk_count = 512;
3752 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3753 mmc->max_seg_size = mmc->max_req_size;
3754 #endif /* CONFIG_MMC_DW_IDMAC */
3758 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3760 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3765 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3766 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3770 if (IS_ERR(host->vmmc)) {
3771 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3774 ret = regulator_enable(host->vmmc);
3777 "failed to enable regulator: %d\n", ret);
3784 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3786 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3787 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3789 dw_mci_init_pinctrl(host);
3790 ret = mmc_add_host(mmc);
3794 #if defined(CONFIG_DEBUG_FS)
3795 dw_mci_init_debugfs(slot);
3798 /* Card initially undetected */
3799 slot->last_detect_state = 1;
3803 unregister_pm_notifier(&mmc->pm_notify);
3806 if (gpio_is_valid(slot->cd_gpio))
3807 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3812 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3814 /* Shutdown detect IRQ */
3815 if (slot->host->pdata->exit)
3816 slot->host->pdata->exit(id);
3818 /* Debugfs stuff is cleaned up by mmc core */
3819 mmc_remove_host(slot->mmc);
3820 slot->host->slot[id] = NULL;
3821 mmc_free_host(slot->mmc);
3824 static void dw_mci_init_dma(struct dw_mci *host)
3826 /* Alloc memory for sg translation */
3827 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3828 &host->sg_dma, GFP_KERNEL);
3829 if (!host->sg_cpu) {
3830 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3835 memset(host->sg_cpu, 0, PAGE_SIZE);
3838 /* Determine which DMA interface to use */
3839 #if defined(CONFIG_MMC_DW_IDMAC)
3840 if(cpu_is_rk3036() || cpu_is_rk312x()){
3841 host->dma_ops = &dw_mci_edmac_ops;
3842 dev_info(host->dev, "Using external DMA controller.\n");
3844 host->dma_ops = &dw_mci_idmac_ops;
3845 dev_info(host->dev, "Using internal DMA controller.\n");
3852 if (host->dma_ops->init && host->dma_ops->start &&
3853 host->dma_ops->stop && host->dma_ops->cleanup) {
3854 if (host->dma_ops->init(host)) {
3855 dev_err(host->dev, "%s: Unable to initialize "
3856 "DMA Controller.\n", __func__);
3860 dev_err(host->dev, "DMA initialization not found.\n");
3868 dev_info(host->dev, "Using PIO mode.\n");
3873 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3875 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3878 ctrl = mci_readl(host, CTRL);
3880 mci_writel(host, CTRL, ctrl);
3882 /* wait till resets clear */
3884 ctrl = mci_readl(host, CTRL);
3885 if (!(ctrl & reset))
3887 } while (time_before(jiffies, timeout));
3890 "Timeout resetting block (ctrl reset %#x)\n",
3896 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3899 * Reseting generates a block interrupt, hence setting
3900 * the scatter-gather pointer to NULL.
3903 sg_miter_stop(&host->sg_miter);
3907 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3910 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3912 return dw_mci_ctrl_reset(host,
3913 SDMMC_CTRL_FIFO_RESET |
3915 SDMMC_CTRL_DMA_RESET);
3918 static void dw_mci_rst_pre_suspend(struct dw_mci *host)
3923 buffer = host->regs_buffer;
3925 for (index = 0; index < DW_REGS_NUM ; index++){
3926 *buffer = mci_readreg(host, index*4);
3927 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n",
3928 dw_mci_regs[index].name, *buffer);
3932 *buffer = mci_readl(host,CDTHRCTL);
3933 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n", "CARDTHRCTL", *buffer);
3936 static void dw_mci_rst_post_resume(struct dw_mci *host)
3941 buffer = host->regs_buffer;
3943 for (index = 0; index < DW_REGS_NUM; index++){
3944 mci_writereg(host, index*4, *buffer);
3947 mci_writel(host, CDTHRCTL, *buffer);
3950 static const struct dw_mci_rst_ops dw_mci_pdrst_ops = {
3951 .pre_suspend = dw_mci_rst_pre_suspend,
3952 .post_resume = dw_mci_rst_post_resume,
3957 static struct dw_mci_of_quirks {
3962 .quirk = "broken-cd",
3963 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3967 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3969 struct dw_mci_board *pdata;
3970 struct device *dev = host->dev;
3971 struct device_node *np = dev->of_node;
3972 const struct dw_mci_drv_data *drv_data = host->drv_data;
3974 u32 clock_frequency;
3976 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3978 dev_err(dev, "could not allocate memory for pdata\n");
3979 return ERR_PTR(-ENOMEM);
3982 /* find out number of slots supported */
3983 if (of_property_read_u32(dev->of_node, "num-slots",
3984 &pdata->num_slots)) {
3985 dev_info(dev, "num-slots property not found, "
3986 "assuming 1 slot is available\n");
3987 pdata->num_slots = 1;
3991 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3992 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3993 pdata->quirks |= of_quirks[idx].id;
3996 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3997 dev_info(dev, "fifo-depth property not found, using "
3998 "value of FIFOTH register as default\n");
4000 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
4002 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
4003 pdata->bus_hz = clock_frequency;
4005 if (drv_data && drv_data->parse_dt) {
4006 ret = drv_data->parse_dt(host);
4008 return ERR_PTR(ret);
4011 if (of_find_property(np, "keep-power-in-suspend", NULL))
4012 pdata->pm_caps |= MMC_PM_KEEP_POWER;
4014 if (of_find_property(np, "enable-sdio-wakeup", NULL))
4015 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
4017 if (of_find_property(np, "supports-highspeed", NULL))
4018 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4020 if (of_find_property(np, "supports-UHS_SDR104", NULL))
4021 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4023 if (of_find_property(np, "supports-DDR_MODE", NULL))
4024 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
4026 if (of_find_property(np, "caps2-mmc-hs200", NULL))
4027 pdata->caps2 |= MMC_CAP2_HS200;
4029 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
4030 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
4032 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
4033 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
4035 if (of_get_property(np, "cd-inverted", NULL))
4036 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
4037 if (of_get_property(np, "bootpart-no-access", NULL))
4038 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4040 if (of_get_property(np, "controller-power-down", NULL)) {
4041 host->regs_buffer = (u32 *)devm_kzalloc(host->dev,
4042 DW_REGS_SIZE, GFP_KERNEL);
4043 if (!host->regs_buffer) {
4045 "could not allocate memory for regs_buffer\n");
4046 return ERR_PTR(-ENOMEM);
4049 host->rst_ops = &dw_mci_pdrst_ops;
4050 mmc_assume_removable = 0;
4056 #else /* CONFIG_OF */
4057 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
4059 return ERR_PTR(-EINVAL);
4061 #endif /* CONFIG_OF */
4063 int dw_mci_probe(struct dw_mci *host)
4065 const struct dw_mci_drv_data *drv_data = host->drv_data;
4066 int width, i, ret = 0;
4072 host->pdata = dw_mci_parse_dt(host);
4073 if (IS_ERR(host->pdata)) {
4074 dev_err(host->dev, "platform data not available\n");
4079 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
4081 "Platform data must supply select_slot function\n");
4086 * In 2.40a spec, Data offset is changed.
4087 * Need to check the version-id and set data-offset for DATA register.
4089 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
4090 dev_info(host->dev, "Version ID is %04x\n", host->verid);
4092 if (host->verid < DW_MMC_240A)
4093 host->data_offset = DATA_OFFSET;
4095 host->data_offset = DATA_240A_OFFSET;
4098 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
4099 if (IS_ERR(host->hpclk_mmc)) {
4100 dev_err(host->dev, "failed to get hpclk_mmc\n");
4102 clk_prepare_enable(host->hpclk_mmc);
4106 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
4107 if (IS_ERR(host->hclk_mmc)) {
4108 dev_err(host->dev, "failed to get hclk_mmc\n");
4109 ret = PTR_ERR(host->hclk_mmc);
4113 clk_prepare_enable(host->hclk_mmc);
4116 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
4117 if (IS_ERR(host->clk_mmc)) {
4118 dev_err(host->dev, "failed to get clk mmc_per\n");
4119 ret = PTR_ERR(host->clk_mmc);
4123 host->bus_hz = host->pdata->bus_hz;
4124 if (!host->bus_hz) {
4125 dev_err(host->dev,"Platform data must supply bus speed\n");
4130 if (host->verid < DW_MMC_240A)
4131 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
4133 //rockchip: fix divider 2 in clksum before controlller
4134 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
4137 dev_err(host->dev, "failed to set clk mmc\n");
4140 clk_prepare_enable(host->clk_mmc);
4142 if (drv_data && drv_data->setup_clock) {
4143 ret = drv_data->setup_clock(host);
4146 "implementation specific clock setup failed\n");
4151 host->quirks = host->pdata->quirks;
4152 host->irq_state = true;
4153 host->set_speed = 0;
4155 host->svi_flags = 0;
4157 spin_lock_init(&host->lock);
4158 spin_lock_init(&host->slock);
4160 INIT_LIST_HEAD(&host->queue);
4162 * Get the host data width - this assumes that HCON has been set with
4163 * the correct values.
4165 i = (mci_readl(host, HCON) >> 7) & 0x7;
4167 host->push_data = dw_mci_push_data16;
4168 host->pull_data = dw_mci_pull_data16;
4170 host->data_shift = 1;
4171 } else if (i == 2) {
4172 host->push_data = dw_mci_push_data64;
4173 host->pull_data = dw_mci_pull_data64;
4175 host->data_shift = 3;
4177 /* Check for a reserved value, and warn if it is */
4179 "HCON reports a reserved host data width!\n"
4180 "Defaulting to 32-bit access.\n");
4181 host->push_data = dw_mci_push_data32;
4182 host->pull_data = dw_mci_pull_data32;
4184 host->data_shift = 2;
4187 /* Reset all blocks */
4188 if (!dw_mci_ctrl_all_reset(host))
4191 host->dma_ops = host->pdata->dma_ops;
4192 dw_mci_init_dma(host);
4194 /* Clear the interrupts for the host controller */
4195 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4196 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4198 /* Put in max timeout */
4199 mci_writel(host, TMOUT, 0xFFFFFFFF);
4202 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4203 * Tx Mark = fifo_size / 2 DMA Size = 8
4205 if (!host->pdata->fifo_depth) {
4207 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4208 * have been overwritten by the bootloader, just like we're
4209 * about to do, so if you know the value for your hardware, you
4210 * should put it in the platform data.
4212 fifo_size = mci_readl(host, FIFOTH);
4213 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4215 fifo_size = host->pdata->fifo_depth;
4217 host->fifo_depth = fifo_size;
4219 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4220 mci_writel(host, FIFOTH, host->fifoth_val);
4222 /* disable clock to CIU */
4223 mci_writel(host, CLKENA, 0);
4224 mci_writel(host, CLKSRC, 0);
4226 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4227 host->card_workqueue = alloc_workqueue("dw-mci-card",
4228 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4229 if (!host->card_workqueue) {
4233 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4234 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4235 host->irq_flags, "dw-mci", host);
4239 if (host->pdata->num_slots)
4240 host->num_slots = host->pdata->num_slots;
4242 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4244 /* We need at least one slot to succeed */
4245 for (i = 0; i < host->num_slots; i++) {
4246 ret = dw_mci_init_slot(host, i);
4248 dev_dbg(host->dev, "slot %d init failed\n", i);
4254 * Enable interrupts for command done, data over, data empty, card det,
4255 * receive ready and error such as transmit, receive timeout, crc error
4257 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4258 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4259 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4260 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4261 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4262 regs |= SDMMC_INT_CD;
4264 mci_writel(host, INTMASK, regs);
4266 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4268 dev_info(host->dev, "DW MMC controller at irq %d, "
4269 "%d bit host data width, "
4271 host->irq, width, fifo_size);
4274 dev_info(host->dev, "%d slots initialized\n", init_slots);
4276 dev_dbg(host->dev, "attempted to initialize %d slots, "
4277 "but failed on all\n", host->num_slots);
4282 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4283 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4288 destroy_workqueue(host->card_workqueue);
4291 if (host->use_dma && host->dma_ops->exit)
4292 host->dma_ops->exit(host);
4295 regulator_disable(host->vmmc);
4296 regulator_put(host->vmmc);
4300 if (!IS_ERR(host->clk_mmc))
4301 clk_disable_unprepare(host->clk_mmc);
4303 if (!IS_ERR(host->hclk_mmc))
4304 clk_disable_unprepare(host->hclk_mmc);
4307 EXPORT_SYMBOL(dw_mci_probe);
4309 void dw_mci_remove(struct dw_mci *host)
4311 struct mmc_host *mmc = host->mmc;
4312 struct dw_mci_slot *slot = mmc_priv(mmc);
4315 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4316 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4318 for(i = 0; i < host->num_slots; i++){
4319 dev_dbg(host->dev, "remove slot %d\n", i);
4321 dw_mci_cleanup_slot(host->slot[i], i);
4324 /* disable clock to CIU */
4325 mci_writel(host, CLKENA, 0);
4326 mci_writel(host, CLKSRC, 0);
4328 destroy_workqueue(host->card_workqueue);
4329 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4330 unregister_pm_notifier(&host->mmc->pm_notify);
4332 if (host->use_dma && host->dma_ops->exit)
4333 host->dma_ops->exit(host);
4335 if (gpio_is_valid(slot->cd_gpio))
4336 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4339 regulator_disable(host->vmmc);
4340 regulator_put(host->vmmc);
4342 if (!IS_ERR(host->clk_mmc))
4343 clk_disable_unprepare(host->clk_mmc);
4345 if (!IS_ERR(host->hclk_mmc))
4346 clk_disable_unprepare(host->hclk_mmc);
4347 if (!IS_ERR(host->hpclk_mmc))
4348 clk_disable_unprepare(host->hpclk_mmc);
4350 EXPORT_SYMBOL(dw_mci_remove);
4354 #ifdef CONFIG_PM_SLEEP
4356 * TODO: we should probably disable the clock to the card in the suspend path.
4358 extern int get_wifi_chip_type(void);
4359 int dw_mci_suspend(struct dw_mci *host)
4361 int present = dw_mci_get_cd(host->mmc);
4363 if((host->mmc->restrict_caps &
4364 RESTRICT_CARD_TYPE_SDIO) &&
4365 (get_wifi_chip_type() == WIFI_ESP8089 ||
4366 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4370 regulator_disable(host->vmmc);
4372 /* Only for sdmmc controller */
4373 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4374 disable_irq(host->irq);
4376 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4377 MMC_DBG_ERR_FUNC(host->mmc,
4378 "Idle pinctrl setting failed! [%s]",
4379 mmc_hostname(host->mmc));
4382 /* Soc rk3126/3036 already in gpio_cd mode */
4383 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4384 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4385 enable_irq_wake(host->mmc->slot.cd_irq);
4389 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4390 mci_writel(host, INTMASK, 0x00);
4391 mci_writel(host, CTRL, 0x00);
4393 if (host->rst_ops &&
4394 host->rst_ops->pre_suspend)
4395 host->rst_ops->pre_suspend(host);
4399 EXPORT_SYMBOL(dw_mci_suspend);
4401 int dw_mci_resume(struct dw_mci *host)
4405 struct dw_mci_slot *slot;
4406 int present = dw_mci_get_cd(host->mmc);
4408 if (host->rst_ops &&
4409 host->rst_ops->post_resume)
4410 host->rst_ops->post_resume(host);
4413 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4414 (get_wifi_chip_type() == WIFI_ESP8089 ||
4415 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4418 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4419 slot = mmc_priv(host->mmc);
4420 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4424 /*only for sdmmc controller*/
4425 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4426 /* Soc rk3126/3036 already in gpio_cd mode */
4427 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4428 disable_irq_wake(host->mmc->slot.cd_irq);
4429 mmc_gpio_free_cd(host->mmc);
4433 if (!IS_ERR(host->pins_udbg)) {
4434 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4435 MMC_DBG_ERR_FUNC(host->mmc,
4436 "Idle pinctrl setting failed! [%s]",
4437 mmc_hostname(host->mmc));
4438 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
4439 MMC_DBG_ERR_FUNC(host->mmc,
4440 "Udbg pinctrl setting failed! [%s]",
4441 mmc_hostname(host->mmc));
4443 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4444 MMC_DBG_ERR_FUNC(host->mmc,
4445 "Default pinctrl setting failed! [%s]",
4446 mmc_hostname(host->mmc));
4449 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4450 MMC_DBG_ERR_FUNC(host->mmc,
4451 "Default pinctrl setting failed! [%s]",
4452 mmc_hostname(host->mmc));
4457 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4458 else if(cpu_is_rk3036())
4459 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4460 else if(cpu_is_rk312x())
4461 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4462 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4465 ret = regulator_enable(host->vmmc);
4468 "failed to enable regulator: %d\n", ret);
4473 if(!dw_mci_ctrl_all_reset(host)){
4478 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4479 if(host->use_dma && host->dma_ops->init)
4480 host->dma_ops->init(host);
4483 * Restore the initial value at FIFOTH register
4484 * And Invalidate the prev_blksz with zero
4486 mci_writel(host, FIFOTH, host->fifoth_val);
4487 host->prev_blksz = 0;
4488 /* Put in max timeout */
4489 mci_writel(host, TMOUT, 0xFFFFFFFF);
4491 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4492 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR |
4493 SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
4495 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4496 regs |= SDMMC_INT_CD;
4498 mci_writel(host, INTMASK, regs);
4499 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4501 /*only for sdmmc controller*/
4502 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)){
4503 enable_irq(host->irq);
4506 for(i = 0; i < host->num_slots; i++){
4507 struct dw_mci_slot *slot = host->slot[i];
4510 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4511 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4512 dw_mci_setup_bus(slot, true);
4518 EXPORT_SYMBOL(dw_mci_resume);
4519 #endif /* CONFIG_PM_SLEEP */
4521 static int __init dw_mci_init(void)
4523 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4527 static void __exit dw_mci_exit(void)
4531 module_init(dw_mci_init);
4532 module_exit(dw_mci_exit);
4534 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4535 MODULE_AUTHOR("NXP Semiconductor VietNam");
4536 MODULE_AUTHOR("Imagination Technologies Ltd");
4537 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4538 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4539 MODULE_LICENSE("GPL v2");