2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/rk_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
38 #include <linux/workqueue.h>
40 #include <linux/of_gpio.h>
41 #include <linux/mmc/slot-gpio.h>
44 #include "rk_sdmmc_of.h"
46 /* Common flag combinations */
47 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
48 SDMMC_INT_HTO | SDMMC_INT_SBE | \
50 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
52 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
53 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
54 #define DW_MCI_SEND_STATUS 1
55 #define DW_MCI_RECV_STATUS 2
56 #define DW_MCI_DMA_THRESHOLD 16
58 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
59 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
61 #define SDMMC_DATA_TIMEOUT_SD 500; /*max is 250ms refer to Spec; Maybe adapt the value to the sick card.*/
62 #define SDMMC_DATA_TIMEOUT_SDIO 250
63 #define SDMMC_DATA_TIMEOUT_EMMC 2500
65 #ifdef CONFIG_MMC_DW_IDMAC
66 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
67 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
68 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
72 u32 des0; /* Control Descriptor */
73 #define IDMAC_DES0_DIC BIT(1)
74 #define IDMAC_DES0_LD BIT(2)
75 #define IDMAC_DES0_FD BIT(3)
76 #define IDMAC_DES0_CH BIT(4)
77 #define IDMAC_DES0_ER BIT(5)
78 #define IDMAC_DES0_CES BIT(30)
79 #define IDMAC_DES0_OWN BIT(31)
81 u32 des1; /* Buffer sizes */
82 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
83 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
85 u32 des2; /* buffer 1 physical address */
87 u32 des3; /* buffer 2 physical address */
89 #endif /* CONFIG_MMC_DW_IDMAC */
91 static const u8 tuning_blk_pattern_4bit[] = {
92 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
93 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
94 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
95 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
96 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
97 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
98 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
99 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
102 static const u8 tuning_blk_pattern_8bit[] = {
103 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
104 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
105 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
106 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
107 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
108 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
109 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
110 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
111 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
112 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
113 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
114 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
115 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
116 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
117 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
118 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
121 /*printk the all register of current host*/
122 static int dw_mci_regs_printk(struct dw_mci *host)
124 struct sdmmc_reg *regs = dw_mci_regs;
126 while( regs->name != 0 ){
127 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
130 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
135 #if defined(CONFIG_DEBUG_FS)
136 static int dw_mci_req_show(struct seq_file *s, void *v)
138 struct dw_mci_slot *slot = s->private;
139 struct mmc_request *mrq;
140 struct mmc_command *cmd;
141 struct mmc_command *stop;
142 struct mmc_data *data;
144 /* Make sure we get a consistent snapshot */
145 spin_lock_bh(&slot->host->lock);
155 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
156 cmd->opcode, cmd->arg, cmd->flags,
157 cmd->resp[0], cmd->resp[1], cmd->resp[2],
158 cmd->resp[2], cmd->error);
160 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
161 data->bytes_xfered, data->blocks,
162 data->blksz, data->flags, data->error);
165 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
166 stop->opcode, stop->arg, stop->flags,
167 stop->resp[0], stop->resp[1], stop->resp[2],
168 stop->resp[2], stop->error);
171 spin_unlock_bh(&slot->host->lock);
176 static int dw_mci_req_open(struct inode *inode, struct file *file)
178 return single_open(file, dw_mci_req_show, inode->i_private);
181 static const struct file_operations dw_mci_req_fops = {
182 .owner = THIS_MODULE,
183 .open = dw_mci_req_open,
186 .release = single_release,
189 static int dw_mci_regs_show(struct seq_file *s, void *v)
191 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
192 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
193 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
194 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
195 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
196 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
201 static int dw_mci_regs_open(struct inode *inode, struct file *file)
203 return single_open(file, dw_mci_regs_show, inode->i_private);
206 static const struct file_operations dw_mci_regs_fops = {
207 .owner = THIS_MODULE,
208 .open = dw_mci_regs_open,
211 .release = single_release,
214 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
216 struct mmc_host *mmc = slot->mmc;
217 struct dw_mci *host = slot->host;
221 root = mmc->debugfs_root;
225 node = debugfs_create_file("regs", S_IRUSR, root, host,
230 node = debugfs_create_file("req", S_IRUSR, root, slot,
235 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
239 node = debugfs_create_x32("pending_events", S_IRUSR, root,
240 (u32 *)&host->pending_events);
244 node = debugfs_create_x32("completed_events", S_IRUSR, root,
245 (u32 *)&host->completed_events);
252 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
254 #endif /* defined(CONFIG_DEBUG_FS) */
256 static void dw_mci_set_timeout(struct dw_mci *host)
258 /* timeout (maximum) */
259 mci_writel(host, TMOUT, 0xffffffff);
262 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
264 struct mmc_data *data;
265 struct dw_mci_slot *slot = mmc_priv(mmc);
266 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
268 cmd->error = -EINPROGRESS;
272 if (cmdr == MMC_STOP_TRANSMISSION)
273 cmdr |= SDMMC_CMD_STOP;
275 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
277 if (cmd->flags & MMC_RSP_PRESENT) {
278 /* We expect a response, so set this bit */
279 cmdr |= SDMMC_CMD_RESP_EXP;
280 if (cmd->flags & MMC_RSP_136)
281 cmdr |= SDMMC_CMD_RESP_LONG;
284 if (cmd->flags & MMC_RSP_CRC)
285 cmdr |= SDMMC_CMD_RESP_CRC;
289 cmdr |= SDMMC_CMD_DAT_EXP;
290 if (data->flags & MMC_DATA_STREAM)
291 cmdr |= SDMMC_CMD_STRM_MODE;
292 if (data->flags & MMC_DATA_WRITE)
293 cmdr |= SDMMC_CMD_DAT_WR;
296 if (drv_data && drv_data->prepare_command)
297 drv_data->prepare_command(slot->host, &cmdr);
302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
304 struct mmc_command *stop;
310 stop = &host->stop_abort;
312 memset(stop, 0, sizeof(struct mmc_command));
314 if (cmdr == MMC_READ_SINGLE_BLOCK ||
315 cmdr == MMC_READ_MULTIPLE_BLOCK ||
316 cmdr == MMC_WRITE_BLOCK ||
317 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
318 stop->opcode = MMC_STOP_TRANSMISSION;
320 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
321 } else if (cmdr == SD_IO_RW_EXTENDED) {
322 stop->opcode = SD_IO_RW_DIRECT;
323 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
324 ((cmd->arg >> 28) & 0x7);
325 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
330 cmdr = stop->opcode | SDMMC_CMD_STOP |
331 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
336 static void dw_mci_start_command(struct dw_mci *host,
337 struct mmc_command *cmd, u32 cmd_flags)
341 "start command: ARGR=0x%08x CMDR=0x%08x\n",
342 cmd->arg, cmd_flags);
344 mci_writel(host, CMDARG, cmd->arg);
346 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s start cmd=%d, arg=0x%x[%s]",__LINE__, __FUNCTION__,cmd->opcode, cmd->arg,mmc_hostname(host->mmc));
347 //dw_mci_regs_printk(host);
349 if(host->mmc->hold_reg_flag)
350 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;//fix the value to 1 in some Soc,for example RK3188.
352 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
355 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
357 dw_mci_start_command(host, data->stop, host->stop_cmdr);
360 /* DMA interface functions */
361 static void dw_mci_stop_dma(struct dw_mci *host)
363 if (host->using_dma) {
364 host->dma_ops->stop(host);
365 host->dma_ops->cleanup(host);
368 /* Data transfer was stopped by the interrupt handler */
369 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
372 static int dw_mci_get_dma_dir(struct mmc_data *data)
374 if (data->flags & MMC_DATA_WRITE)
375 return DMA_TO_DEVICE;
377 return DMA_FROM_DEVICE;
380 #ifdef CONFIG_MMC_DW_IDMAC
381 static void dw_mci_dma_cleanup(struct dw_mci *host)
383 struct mmc_data *data = host->data;
386 if (!data->host_cookie)
387 dma_unmap_sg(host->dev,
390 dw_mci_get_dma_dir(data));
393 static void dw_mci_idmac_reset(struct dw_mci *host)
395 u32 bmod = mci_readl(host, BMOD);
396 /* Software reset of DMA */
397 bmod |= SDMMC_IDMAC_SWRESET;
398 mci_writel(host, BMOD, bmod);
401 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
405 /* Disable and reset the IDMAC interface */
406 temp = mci_readl(host, CTRL);
407 temp &= ~SDMMC_CTRL_USE_IDMAC;
408 temp |= SDMMC_CTRL_DMA_RESET;
409 mci_writel(host, CTRL, temp);
411 /* Stop the IDMAC running */
412 temp = mci_readl(host, BMOD);
413 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
414 temp |= SDMMC_IDMAC_SWRESET;
415 mci_writel(host, BMOD, temp);
418 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
420 struct mmc_data *data = host->data;
422 dev_vdbg(host->dev, "DMA complete\n");
423 // MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
424 // host->mrq->cmd->opcode,host->mrq->cmd->arg,data->blocks,data->blksz,mmc_hostname(host->mmc));
426 host->dma_ops->cleanup(host);
429 * If the card was removed, data will be NULL. No point in trying to
430 * send the stop command or waiting for NBUSY in this case.
433 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
434 tasklet_schedule(&host->tasklet);
438 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
442 struct idmac_desc *desc = host->sg_cpu;
444 for (i = 0; i < sg_len; i++, desc++) {
445 unsigned int length = sg_dma_len(&data->sg[i]);
446 u32 mem_addr = sg_dma_address(&data->sg[i]);
448 /* Set the OWN bit and disable interrupts for this descriptor */
449 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
452 IDMAC_SET_BUFFER1_SIZE(desc, length);
454 /* Physical address to DMA to/from */
455 desc->des2 = mem_addr;
458 /* Set first descriptor */
460 desc->des0 |= IDMAC_DES0_FD;
462 /* Set last descriptor */
463 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
464 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
465 desc->des0 |= IDMAC_DES0_LD;
470 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
474 dw_mci_translate_sglist(host, host->data, sg_len);
476 /* Select IDMAC interface */
477 temp = mci_readl(host, CTRL);
478 temp |= SDMMC_CTRL_USE_IDMAC;
479 mci_writel(host, CTRL, temp);
483 /* Enable the IDMAC */
484 temp = mci_readl(host, BMOD);
485 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
486 mci_writel(host, BMOD, temp);
488 /* Start it running */
489 mci_writel(host, PLDMND, 1);
492 static int dw_mci_idmac_init(struct dw_mci *host)
494 struct idmac_desc *p;
497 /* Number of descriptors in the ring buffer */
498 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
500 /* Forward link the descriptor list */
501 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
502 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
504 /* Set the last descriptor as the end-of-ring descriptor */
505 p->des3 = host->sg_dma;
506 p->des0 = IDMAC_DES0_ER;
508 dw_mci_idmac_reset(host);
510 /* Mask out interrupts - get Tx & Rx complete only */
511 mci_writel(host, IDSTS, IDMAC_INT_CLR);
512 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
515 /* Set the descriptor base address */
516 mci_writel(host, DBADDR, host->sg_dma);
520 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
521 .init = dw_mci_idmac_init,
522 .start = dw_mci_idmac_start_dma,
523 .stop = dw_mci_idmac_stop_dma,
524 .complete = dw_mci_idmac_complete_dma,
525 .cleanup = dw_mci_dma_cleanup,
527 #endif /* CONFIG_MMC_DW_IDMAC */
529 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
530 struct mmc_data *data,
533 struct scatterlist *sg;
534 unsigned int i, sg_len;
536 if (!next && data->host_cookie)
537 return data->host_cookie;
540 * We don't do DMA on "complex" transfers, i.e. with
541 * non-word-aligned buffers or lengths. Also, we don't bother
542 * with all the DMA setup overhead for short transfers.
544 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
550 for_each_sg(data->sg, sg, data->sg_len, i) {
551 if (sg->offset & 3 || sg->length & 3)
555 sg_len = dma_map_sg(host->dev,
558 dw_mci_get_dma_dir(data));
563 data->host_cookie = sg_len;
568 static void dw_mci_pre_req(struct mmc_host *mmc,
569 struct mmc_request *mrq,
572 struct dw_mci_slot *slot = mmc_priv(mmc);
573 struct mmc_data *data = mrq->data;
575 if (!slot->host->use_dma || !data)
578 if (data->host_cookie) {
579 data->host_cookie = 0;
583 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
584 data->host_cookie = 0;
587 static void dw_mci_post_req(struct mmc_host *mmc,
588 struct mmc_request *mrq,
591 struct dw_mci_slot *slot = mmc_priv(mmc);
592 struct mmc_data *data = mrq->data;
594 if (!slot->host->use_dma || !data)
597 if (data->host_cookie)
598 dma_unmap_sg(slot->host->dev,
601 dw_mci_get_dma_dir(data));
602 data->host_cookie = 0;
605 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
607 #ifdef CONFIG_MMC_DW_IDMAC
608 unsigned int blksz = data->blksz;
609 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
610 u32 fifo_width = 1 << host->data_shift;
611 u32 blksz_depth = blksz / fifo_width, fifoth_val;
612 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
613 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
615 tx_wmark = (host->fifo_depth) / 2;
616 tx_wmark_invers = host->fifo_depth - tx_wmark;
620 * if blksz is not a multiple of the FIFO width
622 if (blksz % fifo_width) {
629 if (!((blksz_depth % mszs[idx]) ||
630 (tx_wmark_invers % mszs[idx]))) {
632 rx_wmark = mszs[idx] - 1;
637 * If idx is '0', it won't be tried
638 * Thus, initial values are uesed
641 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
642 mci_writel(host, FIFOTH, fifoth_val);
646 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
648 unsigned int blksz = data->blksz;
649 u32 blksz_depth, fifo_depth;
652 WARN_ON(!(data->flags & MMC_DATA_READ));
654 if (host->timing != MMC_TIMING_MMC_HS200 &&
655 host->timing != MMC_TIMING_UHS_SDR104)
658 blksz_depth = blksz / (1 << host->data_shift);
659 fifo_depth = host->fifo_depth;
661 if (blksz_depth > fifo_depth)
665 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
666 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
667 * Currently just choose blksz.
670 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
674 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
677 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
684 /* If we don't have a channel, we can't do DMA */
688 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
690 host->dma_ops->stop(host);
697 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
698 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
702 * Decide the MSIZE and RX/TX Watermark.
703 * If current block size is same with previous size,
704 * no need to update fifoth.
706 if (host->prev_blksz != data->blksz)
707 dw_mci_adjust_fifoth(host, data);
709 /* Enable the DMA interface */
710 temp = mci_readl(host, CTRL);
711 temp |= SDMMC_CTRL_DMA_ENABLE;
712 mci_writel(host, CTRL, temp);
714 /* Disable RX/TX IRQs, let DMA handle it */
715 temp = mci_readl(host, INTMASK);
716 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
717 mci_writel(host, INTMASK, temp);
719 host->dma_ops->start(host, sg_len);
724 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
728 data->error = -EINPROGRESS;
734 if (data->flags & MMC_DATA_READ) {
735 host->dir_status = DW_MCI_RECV_STATUS;
736 dw_mci_ctrl_rd_thld(host, data);
738 host->dir_status = DW_MCI_SEND_STATUS;
741 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
742 data->blocks, data->blksz, mmc_hostname(host->mmc));
744 if (dw_mci_submit_data_dma(host, data)) {
745 int flags = SG_MITER_ATOMIC;
746 if (host->data->flags & MMC_DATA_READ)
747 flags |= SG_MITER_TO_SG;
749 flags |= SG_MITER_FROM_SG;
751 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
753 host->part_buf_start = 0;
754 host->part_buf_count = 0;
756 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
757 temp = mci_readl(host, INTMASK);
758 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
759 mci_writel(host, INTMASK, temp);
761 temp = mci_readl(host, CTRL);
762 temp &= ~SDMMC_CTRL_DMA_ENABLE;
763 mci_writel(host, CTRL, temp);
766 * Use the initial fifoth_val for PIO mode.
767 * If next issued data may be transfered by DMA mode,
768 * prev_blksz should be invalidated.
770 mci_writel(host, FIFOTH, host->fifoth_val);
771 host->prev_blksz = 0;
774 * Keep the current block size.
775 * It will be used to decide whether to update
776 * fifoth register next time.
778 host->prev_blksz = data->blksz;
782 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
784 struct dw_mci *host = slot->host;
785 unsigned long timeout = jiffies + msecs_to_jiffies(500);
786 unsigned int cmd_status = 0;
788 mci_writel(host, CMDARG, arg);
790 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
792 while (time_before(jiffies, timeout)) {
793 cmd_status = mci_readl(host, CMD);
794 if (!(cmd_status & SDMMC_CMD_START))
797 dev_err(&slot->mmc->class_dev,
798 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
799 cmd, arg, cmd_status);
802 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
804 struct dw_mci *host = slot->host;
805 unsigned int clock = slot->clock;
810 mci_writel(host, CLKENA, 0);
812 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
813 } else if (clock != host->current_speed || force_clkinit) {
814 div = host->bus_hz / clock;
815 if (host->bus_hz % clock && host->bus_hz > clock)
817 * move the + 1 after the divide to prevent
818 * over-clocking the card.
822 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
824 if ((clock << div) != slot->__clk_old || force_clkinit)
825 dev_info(&slot->mmc->class_dev,
826 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
827 slot->id, host->bus_hz, clock,
828 div ? ((host->bus_hz / div) >> 1) :
832 mci_writel(host, CLKENA, 0);
833 mci_writel(host, CLKSRC, 0);
837 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
839 /* set clock to desired speed */
840 mci_writel(host, CLKDIV, div);
844 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
846 /* enable clock; only low power if no SDIO */
847 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
848 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
849 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
850 mci_writel(host, CLKENA, clk_en_a);
854 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
856 /* keep the clock with reflecting clock dividor */
857 slot->__clk_old = clock << div;
860 host->current_speed = clock;
862 if(slot->ctype != slot->pre_ctype)
863 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]", \
864 div ? ((host->bus_hz / div) >> 1):host->bus_hz, \
865 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits", mmc_hostname(host->mmc));
866 slot->pre_ctype = slot->ctype;
868 /* Set the current slot bus width */
869 mci_writel(host, CTYPE, (slot->ctype << slot->id));
873 static void dw_mci_wait_unbusy(struct dw_mci *host)
875 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
876 unsigned long time_loop;
879 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
881 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
882 timeout = SDMMC_DATA_TIMEOUT_EMMC;
883 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
884 timeout = SDMMC_DATA_TIMEOUT_SD;
886 time_loop = jiffies + msecs_to_jiffies(timeout);
888 status = mci_readl(host, STATUS);
889 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
891 //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");
892 } while (time_before(jiffies, time_loop));
895 static void __dw_mci_start_request(struct dw_mci *host,
896 struct dw_mci_slot *slot,
897 struct mmc_command *cmd)
899 struct mmc_request *mrq;
900 struct mmc_data *data;
904 if (host->pdata->select_slot)
905 host->pdata->select_slot(slot->id);
907 host->cur_slot = slot;
909 #if 0 //add by xbw,at 2014-03-12
910 /*clean FIFO if it is a new request*/
911 if(!(mrq->cmd->opcode & SDMMC_CMD_STOP)) {
912 MMC_DBG_INFO_FUNC("%d..%s: reset the ctrl.", __LINE__, __FUNCTION__);
913 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
914 SDMMC_CTRL_DMA_RESET));
917 dw_mci_wait_unbusy(host);
919 host->pending_events = 0;
920 host->completed_events = 0;
921 host->data_status = 0;
925 dw_mci_set_timeout(host);
926 mci_writel(host, BYTCNT, data->blksz*data->blocks);
927 mci_writel(host, BLKSIZ, data->blksz);
930 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
932 /* this is the first command, send the initialization clock */
933 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
934 cmdflags |= SDMMC_CMD_INIT;
937 dw_mci_submit_data(host, data);
941 dw_mci_start_command(host, cmd, cmdflags);
944 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
947 static void dw_mci_start_request(struct dw_mci *host,
948 struct dw_mci_slot *slot)
950 struct mmc_request *mrq = slot->mrq;
951 struct mmc_command *cmd;
953 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
954 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
956 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
957 __dw_mci_start_request(host, slot, cmd);
960 /* must be called with host->lock held */
961 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
962 struct mmc_request *mrq)
964 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
969 if (host->state == STATE_IDLE) {
970 host->state = STATE_SENDING_CMD;
971 dw_mci_start_request(host, slot);
973 list_add_tail(&slot->queue_node, &host->queue);
977 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
979 struct dw_mci_slot *slot = mmc_priv(mmc);
980 struct dw_mci *host = slot->host;
985 * The check for card presence and queueing of the request must be
986 * atomic, otherwise the card could be removed in between and the
987 * request wouldn't fail until another card was inserted.
989 spin_lock_bh(&host->lock);
991 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
992 spin_unlock_bh(&host->lock);
993 mrq->cmd->error = -ENOMEDIUM;
995 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_reqeust--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(host->mmc));
997 mmc_request_done(mmc, mrq);
1000 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1001 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1003 dw_mci_queue_request(host, slot, mrq);
1005 spin_unlock_bh(&host->lock);
1008 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1010 struct dw_mci_slot *slot = mmc_priv(mmc);
1011 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1014 switch (ios->bus_width) {
1015 case MMC_BUS_WIDTH_4:
1016 slot->ctype = SDMMC_CTYPE_4BIT;
1018 case MMC_BUS_WIDTH_8:
1019 slot->ctype = SDMMC_CTYPE_8BIT;
1022 /* set default 1 bit mode */
1023 slot->ctype = SDMMC_CTYPE_1BIT;
1024 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1027 regs = mci_readl(slot->host, UHS_REG);
1030 if (ios->timing == MMC_TIMING_UHS_DDR50)
1031 regs |= ((0x1 << slot->id) << 16);
1033 regs &= ~((0x1 << slot->id) << 16);
1035 mci_writel(slot->host, UHS_REG, regs);
1036 slot->host->timing = ios->timing;
1039 * Use mirror of ios->clock to prevent race with mmc
1040 * core ios update when finding the minimum.
1042 slot->clock = ios->clock;
1044 if (drv_data && drv_data->set_ios)
1045 drv_data->set_ios(slot->host, ios);
1047 /* Slot specific timing and width adjustment */
1048 dw_mci_setup_bus(slot, false);
1050 switch (ios->power_mode) {
1052 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1054 if (slot->host->pdata->setpower)
1055 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1056 regs = mci_readl(slot->host, PWREN);
1057 regs |= (1 << slot->id);
1058 mci_writel(slot->host, PWREN, regs);
1061 /* Power down slot */
1062 if (slot->host->pdata->setpower)
1063 slot->host->pdata->setpower(slot->id, 0);
1064 regs = mci_readl(slot->host, PWREN);
1065 regs &= ~(1 << slot->id);
1066 mci_writel(slot->host, PWREN, regs);
1073 static int dw_mci_get_ro(struct mmc_host *mmc)
1076 struct dw_mci_slot *slot = mmc_priv(mmc);
1077 struct dw_mci_board *brd = slot->host->pdata;
1079 /* Use platform get_ro function, else try on board write protect */
1080 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1082 else if (brd->get_ro)
1083 read_only = brd->get_ro(slot->id);
1084 else if (gpio_is_valid(slot->wp_gpio))
1085 read_only = gpio_get_value(slot->wp_gpio);
1088 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1090 dev_dbg(&mmc->class_dev, "card is %s\n",
1091 read_only ? "read-only" : "read-write");
1096 static int dw_mci_get_cd(struct mmc_host *mmc)
1099 struct dw_mci_slot *slot = mmc_priv(mmc);
1100 struct dw_mci_board *brd = slot->host->pdata;
1101 struct dw_mci *host = slot->host;
1102 int gpio_cd = mmc_gpio_get_cd(mmc);
1104 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
1105 spin_lock_bh(&host->lock);
1106 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1107 spin_unlock_bh(&host->lock);
1112 /* Use platform get_cd function, else try onboard card detect */
1113 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1115 else if (brd->get_cd)
1116 present = !brd->get_cd(slot->id);
1117 else if (!IS_ERR_VALUE(gpio_cd))
1120 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1123 spin_lock_bh(&host->lock);
1125 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1126 dev_dbg(&mmc->class_dev, "card is present\n");
1128 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1129 dev_dbg(&mmc->class_dev, "card is not present\n");
1131 spin_unlock_bh(&host->lock);
1136 static void dw_mci_hw_reset(struct mmc_host *mmc)
1138 struct dw_mci_slot *slot = mmc_priv(mmc);
1141 * According to eMMC spec
1142 * tRstW >= 1us ; RST_n pulse width
1143 * tRSCA >= 200us ; RST_n to Command time
1144 * tRSTH >= 1us ; RST_n high period
1147 mci_writel(slot->host, RST_n, 0x1);
1149 udelay(10); //10us for bad quality eMMc.
1151 mci_writel(slot->host, RST_n, 0x0);
1153 usleep_range(300, 1000); //ay least 300(> 200us)
1158 * Disable lower power mode.
1160 * Low power mode will stop the card clock when idle. According to the
1161 * description of the CLKENA register we should disable low power mode
1162 * for SDIO cards if we need SDIO interrupts to work.
1164 * This function is fast if low power mode is already disabled.
1166 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1168 struct dw_mci *host = slot->host;
1170 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1172 clk_en_a = mci_readl(host, CLKENA);
1174 if (clk_en_a & clken_low_pwr) {
1175 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1176 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1177 SDMMC_CMD_PRV_DAT_WAIT, 0);
1181 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1183 struct dw_mci_slot *slot = mmc_priv(mmc);
1184 struct dw_mci *host = slot->host;
1187 /* Enable/disable Slot Specific SDIO interrupt */
1188 int_mask = mci_readl(host, INTMASK);
1191 * Turn off low power mode if it was enabled. This is a bit of
1192 * a heavy operation and we disable / enable IRQs a lot, so
1193 * we'll leave low power mode disabled and it will get
1194 * re-enabled again in dw_mci_setup_bus().
1196 dw_mci_disable_low_power(slot);
1198 mci_writel(host, INTMASK,
1199 (int_mask | SDMMC_INT_SDIO(slot->id)));
1201 mci_writel(host, INTMASK,
1202 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1206 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1208 struct dw_mci_slot *slot = mmc_priv(mmc);
1209 struct dw_mci *host = slot->host;
1210 const struct dw_mci_drv_data *drv_data = host->drv_data;
1211 struct dw_mci_tuning_data tuning_data;
1214 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1215 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1216 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1217 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1218 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1219 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1220 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1224 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1225 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1226 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1229 "Undefined command(%d) for tuning\n", opcode);
1233 if (drv_data && drv_data->execute_tuning)
1234 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1238 static const struct mmc_host_ops dw_mci_ops = {
1239 .request = dw_mci_request,
1240 .pre_req = dw_mci_pre_req,
1241 .post_req = dw_mci_post_req,
1242 .set_ios = dw_mci_set_ios,
1243 .get_ro = dw_mci_get_ro,
1244 .get_cd = dw_mci_get_cd,
1245 .hw_reset = dw_mci_hw_reset,
1246 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1247 .execute_tuning = dw_mci_execute_tuning,
1250 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1251 __releases(&host->lock)
1252 __acquires(&host->lock)
1254 if(DW_MCI_SEND_STATUS == host->dir_status){
1256 if( MMC_BUS_TEST_W != host->cmd->opcode){
1257 if(host->data_status & SDMMC_INT_DCRC)
1258 host->data->error = -EILSEQ;
1259 else if(host->data_status & SDMMC_INT_EBE)
1260 host->data->error = -ETIMEDOUT;
1262 dw_mci_wait_unbusy(host);
1265 dw_mci_wait_unbusy(host);
1271 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1272 __releases(&host->lock)
1273 __acquires(&host->lock)
1275 struct dw_mci_slot *slot;
1276 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1278 WARN_ON(host->cmd || host->data);
1280 dw_mci_deal_data_end(host, mrq);
1283 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1284 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1286 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1287 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1289 host->cur_slot->mrq = NULL;
1291 if (!list_empty(&host->queue)) {
1292 slot = list_entry(host->queue.next,
1293 struct dw_mci_slot, queue_node);
1294 list_del(&slot->queue_node);
1295 dev_vdbg(host->dev, "list not empty: %s is next\n",
1296 mmc_hostname(slot->mmc));
1297 host->state = STATE_SENDING_CMD;
1298 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1299 dw_mci_start_request(host, slot);
1301 dev_vdbg(host->dev, "list empty\n");
1302 host->state = STATE_IDLE;
1305 spin_unlock(&host->lock);
1306 mmc_request_done(prev_mmc, mrq);
1307 spin_lock(&host->lock);
1310 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1312 u32 status = host->cmd_status;
1314 host->cmd_status = 0;
1316 /* Read the response from the card (up to 16 bytes) */
1317 if (cmd->flags & MMC_RSP_PRESENT) {
1318 if (cmd->flags & MMC_RSP_136) {
1319 cmd->resp[3] = mci_readl(host, RESP0);
1320 cmd->resp[2] = mci_readl(host, RESP1);
1321 cmd->resp[1] = mci_readl(host, RESP2);
1322 cmd->resp[0] = mci_readl(host, RESP3);
1324 MMC_DBG_INFO_FUNC(host->mmc," command complete [%s], \ncmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x", \
1325 mmc_hostname(host->mmc), cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0]);
1327 cmd->resp[0] = mci_readl(host, RESP0);
1331 MMC_DBG_INFO_FUNC(host->mmc, " command complete [%s], cmd=%d,resp[0]=0x%x",\
1332 mmc_hostname(host->mmc),cmd->opcode, cmd->resp[0]);
1336 if (status & SDMMC_INT_RTO)
1337 cmd->error = -ETIMEDOUT;
1338 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1339 cmd->error = -EILSEQ;
1340 else if (status & SDMMC_INT_RESP_ERR)
1344 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1347 if(MMC_SEND_STATUS != cmd->opcode)
1348 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",\
1349 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1351 /* newer ip versions need a delay between retries */
1352 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1358 static void dw_mci_tasklet_func(unsigned long priv)
1360 struct dw_mci *host = (struct dw_mci *)priv;
1361 struct dw_mci_slot *slot = mmc_priv(host->mmc);
1362 struct mmc_data *data;
1363 struct mmc_command *cmd;
1364 enum dw_mci_state state;
1365 enum dw_mci_state prev_state;
1368 spin_lock(&host->lock);
1370 state = host->state;
1380 case STATE_SENDING_CMD:
1381 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1382 &host->pending_events))
1387 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1388 dw_mci_command_complete(host, cmd);
1389 if (cmd == host->mrq->sbc && !cmd->error) {
1390 prev_state = state = STATE_SENDING_CMD;
1391 __dw_mci_start_request(host, host->cur_slot,
1396 if (cmd->data && cmd->error) {
1397 dw_mci_stop_dma(host);
1400 send_stop_cmd(host, data);
1401 state = STATE_SENDING_STOP;
1407 send_stop_abort(host, data);
1408 state = STATE_SENDING_STOP;
1414 if (!host->mrq->data || cmd->error) {
1415 dw_mci_request_end(host, host->mrq);
1419 prev_state = state = STATE_SENDING_DATA;
1422 case STATE_SENDING_DATA:
1423 if (test_and_clear_bit(EVENT_DATA_ERROR,
1424 &host->pending_events)) {
1425 dw_mci_stop_dma(host);
1428 send_stop_cmd(host, data);
1430 send_stop_abort(host, data);
1432 state = STATE_DATA_ERROR;
1435 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
1436 prev_state,state, mmc_hostname(host->mmc));
1438 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1439 &host->pending_events))
1441 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
1442 prev_state,state,mmc_hostname(host->mmc));
1444 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1445 prev_state = state = STATE_DATA_BUSY;
1448 case STATE_DATA_BUSY:
1449 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1450 &host->pending_events))
1453 dw_mci_deal_data_end(host, host->mrq);
1454 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
1455 prev_state,state,mmc_hostname(host->mmc));
1458 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1459 status = host->data_status;
1461 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1462 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
1463 MMC_DBG_ERR_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
1464 prev_state,state, status, mmc_hostname(host->mmc));
1466 if (status & SDMMC_INT_DRTO) {
1467 data->error = -ETIMEDOUT;
1468 } else if (status & SDMMC_INT_DCRC) {
1469 data->error = -EILSEQ;
1470 } else if (status & SDMMC_INT_EBE &&
1472 DW_MCI_SEND_STATUS) {
1474 * No data CRC status was returned.
1475 * The number of bytes transferred will
1476 * be exaggerated in PIO mode.
1478 data->bytes_xfered = 0;
1479 data->error = -ETIMEDOUT;
1488 * After an error, there may be data lingering
1489 * in the FIFO, so reset it - doing so
1490 * generates a block interrupt, hence setting
1491 * the scatter-gather pointer to NULL.
1493 sg_miter_stop(&host->sg_miter);
1495 ctrl = mci_readl(host, CTRL);
1496 ctrl |= SDMMC_CTRL_FIFO_RESET;
1497 mci_writel(host, CTRL, ctrl);
1499 data->bytes_xfered = data->blocks * data->blksz;
1504 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
1505 prev_state,state,mmc_hostname(host->mmc));
1506 dw_mci_request_end(host, host->mrq);
1509 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
1510 prev_state,state,mmc_hostname(host->mmc));
1512 if (host->mrq->sbc && !data->error) {
1513 data->stop->error = 0;
1515 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
1516 prev_state,state,mmc_hostname(host->mmc));
1518 dw_mci_request_end(host, host->mrq);
1522 prev_state = state = STATE_SENDING_STOP;
1524 send_stop_cmd(host, data);
1526 if (data->stop && !data->error) {
1527 /* stop command for open-ended transfer*/
1529 send_stop_abort(host, data);
1533 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
1534 prev_state,state,mmc_hostname(host->mmc));
1536 case STATE_SENDING_STOP:
1537 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1538 &host->pending_events))
1540 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
1541 prev_state,state,mmc_hostname(host->mmc));
1543 /* CMD error in data command */
1544 if (host->mrq->cmd->error && host->mrq->data) {
1545 sg_miter_stop(&host->sg_miter);
1547 ctrl = mci_readl(host, CTRL);
1548 ctrl |= SDMMC_CTRL_FIFO_RESET;
1549 mci_writel(host, CTRL, ctrl);
1555 dw_mci_command_complete(host, host->mrq->stop);
1557 if (host->mrq->stop)
1558 dw_mci_command_complete(host, host->mrq->stop);
1560 host->cmd_status = 0;
1563 dw_mci_request_end(host, host->mrq);
1566 case STATE_DATA_ERROR:
1567 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1568 &host->pending_events))
1571 state = STATE_DATA_BUSY;
1574 } while (state != prev_state);
1576 host->state = state;
1578 spin_unlock(&host->lock);
1582 /* push final bytes to part_buf, only use during push */
1583 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1585 memcpy((void *)&host->part_buf, buf, cnt);
1586 host->part_buf_count = cnt;
1589 /* append bytes to part_buf, only use during push */
1590 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1592 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1593 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1594 host->part_buf_count += cnt;
1598 /* pull first bytes from part_buf, only use during pull */
1599 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1601 cnt = min(cnt, (int)host->part_buf_count);
1603 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1605 host->part_buf_count -= cnt;
1606 host->part_buf_start += cnt;
1611 /* pull final bytes from the part_buf, assuming it's just been filled */
1612 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1614 memcpy(buf, &host->part_buf, cnt);
1615 host->part_buf_start = cnt;
1616 host->part_buf_count = (1 << host->data_shift) - cnt;
1619 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1621 struct mmc_data *data = host->data;
1624 /* try and push anything in the part_buf */
1625 if (unlikely(host->part_buf_count)) {
1626 int len = dw_mci_push_part_bytes(host, buf, cnt);
1629 if (host->part_buf_count == 2) {
1630 mci_writew(host, DATA(host->data_offset),
1632 host->part_buf_count = 0;
1635 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1636 if (unlikely((unsigned long)buf & 0x1)) {
1638 u16 aligned_buf[64];
1639 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1640 int items = len >> 1;
1642 /* memcpy from input buffer into aligned buffer */
1643 memcpy(aligned_buf, buf, len);
1646 /* push data from aligned buffer into fifo */
1647 for (i = 0; i < items; ++i)
1648 mci_writew(host, DATA(host->data_offset),
1655 for (; cnt >= 2; cnt -= 2)
1656 mci_writew(host, DATA(host->data_offset), *pdata++);
1659 /* put anything remaining in the part_buf */
1661 dw_mci_set_part_bytes(host, buf, cnt);
1662 /* Push data if we have reached the expected data length */
1663 if ((data->bytes_xfered + init_cnt) ==
1664 (data->blksz * data->blocks))
1665 mci_writew(host, DATA(host->data_offset),
1670 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1672 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1673 if (unlikely((unsigned long)buf & 0x1)) {
1675 /* pull data from fifo into aligned buffer */
1676 u16 aligned_buf[64];
1677 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1678 int items = len >> 1;
1680 for (i = 0; i < items; ++i)
1681 aligned_buf[i] = mci_readw(host,
1682 DATA(host->data_offset));
1683 /* memcpy from aligned buffer into output buffer */
1684 memcpy(buf, aligned_buf, len);
1692 for (; cnt >= 2; cnt -= 2)
1693 *pdata++ = mci_readw(host, DATA(host->data_offset));
1697 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1698 dw_mci_pull_final_bytes(host, buf, cnt);
1702 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1704 struct mmc_data *data = host->data;
1707 /* try and push anything in the part_buf */
1708 if (unlikely(host->part_buf_count)) {
1709 int len = dw_mci_push_part_bytes(host, buf, cnt);
1712 if (host->part_buf_count == 4) {
1713 mci_writel(host, DATA(host->data_offset),
1715 host->part_buf_count = 0;
1718 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1719 if (unlikely((unsigned long)buf & 0x3)) {
1721 u32 aligned_buf[32];
1722 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1723 int items = len >> 2;
1725 /* memcpy from input buffer into aligned buffer */
1726 memcpy(aligned_buf, buf, len);
1729 /* push data from aligned buffer into fifo */
1730 for (i = 0; i < items; ++i)
1731 mci_writel(host, DATA(host->data_offset),
1738 for (; cnt >= 4; cnt -= 4)
1739 mci_writel(host, DATA(host->data_offset), *pdata++);
1742 /* put anything remaining in the part_buf */
1744 dw_mci_set_part_bytes(host, buf, cnt);
1745 /* Push data if we have reached the expected data length */
1746 if ((data->bytes_xfered + init_cnt) ==
1747 (data->blksz * data->blocks))
1748 mci_writel(host, DATA(host->data_offset),
1753 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1755 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1756 if (unlikely((unsigned long)buf & 0x3)) {
1758 /* pull data from fifo into aligned buffer */
1759 u32 aligned_buf[32];
1760 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1761 int items = len >> 2;
1763 for (i = 0; i < items; ++i)
1764 aligned_buf[i] = mci_readl(host,
1765 DATA(host->data_offset));
1766 /* memcpy from aligned buffer into output buffer */
1767 memcpy(buf, aligned_buf, len);
1775 for (; cnt >= 4; cnt -= 4)
1776 *pdata++ = mci_readl(host, DATA(host->data_offset));
1780 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1781 dw_mci_pull_final_bytes(host, buf, cnt);
1785 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1787 struct mmc_data *data = host->data;
1790 /* try and push anything in the part_buf */
1791 if (unlikely(host->part_buf_count)) {
1792 int len = dw_mci_push_part_bytes(host, buf, cnt);
1796 if (host->part_buf_count == 8) {
1797 mci_writeq(host, DATA(host->data_offset),
1799 host->part_buf_count = 0;
1802 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1803 if (unlikely((unsigned long)buf & 0x7)) {
1805 u64 aligned_buf[16];
1806 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1807 int items = len >> 3;
1809 /* memcpy from input buffer into aligned buffer */
1810 memcpy(aligned_buf, buf, len);
1813 /* push data from aligned buffer into fifo */
1814 for (i = 0; i < items; ++i)
1815 mci_writeq(host, DATA(host->data_offset),
1822 for (; cnt >= 8; cnt -= 8)
1823 mci_writeq(host, DATA(host->data_offset), *pdata++);
1826 /* put anything remaining in the part_buf */
1828 dw_mci_set_part_bytes(host, buf, cnt);
1829 /* Push data if we have reached the expected data length */
1830 if ((data->bytes_xfered + init_cnt) ==
1831 (data->blksz * data->blocks))
1832 mci_writeq(host, DATA(host->data_offset),
1837 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1839 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1840 if (unlikely((unsigned long)buf & 0x7)) {
1842 /* pull data from fifo into aligned buffer */
1843 u64 aligned_buf[16];
1844 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1845 int items = len >> 3;
1847 for (i = 0; i < items; ++i)
1848 aligned_buf[i] = mci_readq(host,
1849 DATA(host->data_offset));
1850 /* memcpy from aligned buffer into output buffer */
1851 memcpy(buf, aligned_buf, len);
1859 for (; cnt >= 8; cnt -= 8)
1860 *pdata++ = mci_readq(host, DATA(host->data_offset));
1864 host->part_buf = mci_readq(host, DATA(host->data_offset));
1865 dw_mci_pull_final_bytes(host, buf, cnt);
1869 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1873 /* get remaining partial bytes */
1874 len = dw_mci_pull_part_bytes(host, buf, cnt);
1875 if (unlikely(len == cnt))
1880 /* get the rest of the data */
1881 host->pull_data(host, buf, cnt);
1884 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1886 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1888 unsigned int offset;
1889 struct mmc_data *data = host->data;
1890 int shift = host->data_shift;
1893 unsigned int remain, fcnt;
1896 if (!sg_miter_next(sg_miter))
1899 host->sg = sg_miter->piter.sg;
1900 buf = sg_miter->addr;
1901 remain = sg_miter->length;
1905 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1906 << shift) + host->part_buf_count;
1907 len = min(remain, fcnt);
1910 dw_mci_pull_data(host, (void *)(buf + offset), len);
1911 data->bytes_xfered += len;
1916 sg_miter->consumed = offset;
1917 status = mci_readl(host, MINTSTS);
1918 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1919 /* if the RXDR is ready read again */
1920 } while ((status & SDMMC_INT_RXDR) ||
1921 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1924 if (!sg_miter_next(sg_miter))
1926 sg_miter->consumed = 0;
1928 sg_miter_stop(sg_miter);
1932 sg_miter_stop(sg_miter);
1935 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1938 static void dw_mci_write_data_pio(struct dw_mci *host)
1940 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1942 unsigned int offset;
1943 struct mmc_data *data = host->data;
1944 int shift = host->data_shift;
1947 unsigned int fifo_depth = host->fifo_depth;
1948 unsigned int remain, fcnt;
1951 if (!sg_miter_next(sg_miter))
1954 host->sg = sg_miter->piter.sg;
1955 buf = sg_miter->addr;
1956 remain = sg_miter->length;
1960 fcnt = ((fifo_depth -
1961 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1962 << shift) - host->part_buf_count;
1963 len = min(remain, fcnt);
1966 host->push_data(host, (void *)(buf + offset), len);
1967 data->bytes_xfered += len;
1972 sg_miter->consumed = offset;
1973 status = mci_readl(host, MINTSTS);
1974 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1975 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1978 if (!sg_miter_next(sg_miter))
1980 sg_miter->consumed = 0;
1982 sg_miter_stop(sg_miter);
1986 sg_miter_stop(sg_miter);
1989 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1992 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1994 if (!host->cmd_status)
1995 host->cmd_status = status;
1999 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2000 tasklet_schedule(&host->tasklet);
2003 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2005 struct dw_mci *host = dev_id;
2009 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2012 * DTO fix - version 2.10a and below, and only if internal DMA
2015 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2017 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2018 pending |= SDMMC_INT_DATA_OVER;
2022 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2023 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2024 host->cmd_status = pending;
2026 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2029 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2030 /* if there is an error report DATA_ERROR */
2031 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2032 host->data_status = pending;
2034 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2035 tasklet_schedule(&host->tasklet);
2038 if (pending & SDMMC_INT_DATA_OVER) {
2039 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2040 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2041 if (!host->data_status)
2042 host->data_status = pending;
2044 if (host->dir_status == DW_MCI_RECV_STATUS) {
2045 if (host->sg != NULL)
2046 dw_mci_read_data_pio(host, true);
2048 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2049 tasklet_schedule(&host->tasklet);
2052 if (pending & SDMMC_INT_RXDR) {
2053 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2054 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2055 dw_mci_read_data_pio(host, false);
2058 if (pending & SDMMC_INT_TXDR) {
2059 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2060 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2061 dw_mci_write_data_pio(host);
2064 if (pending & SDMMC_INT_CMD_DONE) {
2065 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2066 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2067 dw_mci_cmd_interrupt(host, pending);
2070 if (pending & SDMMC_INT_CD) {
2071 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2072 queue_work(host->card_workqueue, &host->card_work);
2075 /* Handle SDIO Interrupts */
2076 for (i = 0; i < host->num_slots; i++) {
2077 struct dw_mci_slot *slot = host->slot[i];
2078 if (pending & SDMMC_INT_SDIO(i)) {
2079 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
2080 mmc_signal_sdio_irq(slot->mmc);
2086 #ifdef CONFIG_MMC_DW_IDMAC
2087 /* Handle DMA interrupts */
2088 pending = mci_readl(host, IDSTS);
2089 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2090 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2091 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2092 host->dma_ops->complete(host);
2099 static void dw_mci_work_routine_card(struct work_struct *work)
2101 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2104 for (i = 0; i < host->num_slots; i++) {
2105 struct dw_mci_slot *slot = host->slot[i];
2106 struct mmc_host *mmc = slot->mmc;
2107 struct mmc_request *mrq;
2111 present = dw_mci_get_cd(mmc);
2112 while (present != slot->last_detect_state) {
2113 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2114 present ? "inserted" : "removed");
2115 MMC_DBG_BOOT_FUNC(mmc, "card %s, devname=%s \n",
2116 present ? "inserted" : "removed", mmc_hostname(mmc));
2118 spin_lock_bh(&host->lock);
2120 /* Card change detected */
2121 slot->last_detect_state = present;
2123 /* Clean up queue if present */
2126 if (mrq == host->mrq) {
2130 switch (host->state) {
2133 case STATE_SENDING_CMD:
2134 mrq->cmd->error = -ENOMEDIUM;
2138 case STATE_SENDING_DATA:
2139 mrq->data->error = -ENOMEDIUM;
2140 dw_mci_stop_dma(host);
2142 case STATE_DATA_BUSY:
2143 case STATE_DATA_ERROR:
2144 if (mrq->data->error == -EINPROGRESS)
2145 mrq->data->error = -ENOMEDIUM;
2149 case STATE_SENDING_STOP:
2150 mrq->stop->error = -ENOMEDIUM;
2154 dw_mci_request_end(host, mrq);
2156 list_del(&slot->queue_node);
2157 mrq->cmd->error = -ENOMEDIUM;
2159 mrq->data->error = -ENOMEDIUM;
2161 mrq->stop->error = -ENOMEDIUM;
2163 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(mmc));
2165 spin_unlock(&host->lock);
2166 mmc_request_done(slot->mmc, mrq);
2167 spin_lock(&host->lock);
2171 /* Power down slot */
2175 * Clear down the FIFO - doing so generates a
2176 * block interrupt, hence setting the
2177 * scatter-gather pointer to NULL.
2179 sg_miter_stop(&host->sg_miter);
2182 ctrl = mci_readl(host, CTRL);
2183 ctrl |= SDMMC_CTRL_FIFO_RESET;
2184 mci_writel(host, CTRL, ctrl);
2186 #ifdef CONFIG_MMC_DW_IDMAC
2187 dw_mci_idmac_reset(host);
2192 spin_unlock_bh(&host->lock);
2194 present = dw_mci_get_cd(mmc);
2197 mmc_detect_change(slot->mmc,
2198 msecs_to_jiffies(host->pdata->detect_delay_ms));
2203 /* given a slot id, find out the device node representing that slot */
2204 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2206 struct device_node *np;
2210 if (!dev || !dev->of_node)
2213 for_each_child_of_node(dev->of_node, np) {
2214 addr = of_get_property(np, "reg", &len);
2215 if (!addr || (len < sizeof(int)))
2217 if (be32_to_cpup(addr) == slot)
2223 static struct dw_mci_of_slot_quirks {
2226 } of_slot_quirks[] = {
2228 .quirk = "disable-wp",
2229 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2233 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2235 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2240 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2241 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2242 quirks |= of_slot_quirks[idx].id;
2247 /* find out bus-width for a given slot */
2248 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2250 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2256 if (of_property_read_u32(np, "bus-width", &bus_wd))
2257 dev_err(dev, "bus-width property not found, assuming width"
2263 /* find the pwr-en gpio for a given slot; or -1 if none specified */
2264 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
2266 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2272 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
2274 /* Having a missing entry is valid; return silently */
2275 if (!gpio_is_valid(gpio))
2278 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
2279 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2283 gpio_direction_output(gpio, 0);//set 0 to pwr-en
2289 /* find the write protect gpio for a given slot; or -1 if none specified */
2290 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2292 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2298 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2300 /* Having a missing entry is valid; return silently */
2301 if (!gpio_is_valid(gpio))
2304 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2305 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2312 /* find the cd gpio for a given slot */
2313 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2314 struct mmc_host *mmc)
2316 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2322 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2324 /* Having a missing entry is valid; return silently */
2325 if (!gpio_is_valid(gpio))
2328 if (mmc_gpio_request_cd(mmc, gpio, 0))
2329 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2331 #else /* CONFIG_OF */
2332 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2336 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2340 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2344 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2348 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2349 struct mmc_host *mmc)
2353 #endif /* CONFIG_OF */
2355 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2357 struct mmc_host *mmc;
2358 struct dw_mci_slot *slot;
2359 const struct dw_mci_drv_data *drv_data = host->drv_data;
2364 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2368 slot = mmc_priv(mmc);
2372 host->slot[id] = slot;
2375 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2377 mmc->ops = &dw_mci_ops;
2379 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
2380 mmc->f_max = host->bus_hz;
2381 printk("%d..%s: fmin=%d, fmax=%d, bus_hz=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max, host->bus_hz);
2383 if (of_property_read_u32_array(host->dev->of_node,
2384 "clock-freq-min-max", freq, 2)) {
2385 mmc->f_min = DW_MCI_FREQ_MIN;
2386 mmc->f_max = DW_MCI_FREQ_MAX;
2388 printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);
2390 mmc->f_min = freq[0];
2391 mmc->f_max = freq[1];
2393 printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);
2397 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
2398 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
2399 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
2400 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
2401 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
2402 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
2404 if (host->pdata->get_ocr)
2405 mmc->ocr_avail = host->pdata->get_ocr(id);
2407 //mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2408 mmc->ocr_avail = MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|MMC_VDD_30_31
2409 | MMC_VDD_31_32|MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_34_35| MMC_VDD_35_36;
2411 mmc->ocr_avail |= MMC_VDD_26_27 |MMC_VDD_25_26 |MMC_VDD_24_25 |MMC_VDD_23_24
2412 |MMC_VDD_22_23 |MMC_VDD_21_22 |MMC_VDD_20_21 |MMC_VDD_165_195;
2416 * Start with slot power disabled, it will be enabled when a card
2419 if (host->pdata->setpower)
2420 host->pdata->setpower(id, 0);
2422 if (host->pdata->caps)
2423 mmc->caps = host->pdata->caps;
2425 if (host->pdata->pm_caps)
2426 mmc->pm_caps = host->pdata->pm_caps;
2428 if (host->dev->of_node) {
2429 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2433 ctrl_id = to_platform_device(host->dev)->id;
2435 if (drv_data && drv_data->caps)
2436 mmc->caps |= drv_data->caps[ctrl_id];
2437 if (drv_data && drv_data->hold_reg_flag)
2438 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
2440 if (host->pdata->caps2)
2441 mmc->caps2 = host->pdata->caps2;
2443 if (host->pdata->get_bus_wd)
2444 bus_width = host->pdata->get_bus_wd(slot->id);
2445 else if (host->dev->of_node)
2446 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2450 switch (bus_width) {
2452 mmc->caps |= MMC_CAP_8_BIT_DATA;
2454 mmc->caps |= MMC_CAP_4_BIT_DATA;
2456 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
2457 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
2458 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
2459 mmc->caps |= MMC_CAP_SDIO_IRQ;
2460 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
2461 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
2462 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
2463 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2464 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
2465 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2467 if (host->pdata->blk_settings) {
2468 mmc->max_segs = host->pdata->blk_settings->max_segs;
2469 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2470 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2471 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2472 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2474 /* Useful defaults if platform data is unset. */
2475 #ifdef CONFIG_MMC_DW_IDMAC
2476 mmc->max_segs = host->ring_size;
2477 mmc->max_blk_size = 65536;
2478 mmc->max_blk_count = host->ring_size;
2479 mmc->max_seg_size = 0x1000;
2480 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2483 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2484 mmc->max_blk_count = 512;
2485 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2486 mmc->max_seg_size = mmc->max_req_size;
2487 #endif /* CONFIG_MMC_DW_IDMAC */
2490 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
2492 if (gpio_is_valid(slot->pwr_en_gpio))
2497 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
2498 if (IS_ERR(host->vmmc)) {
2499 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2502 ret = regulator_enable(host->vmmc);
2505 "failed to enable regulator: %d\n", ret);
2510 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2511 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2513 ret = mmc_add_host(mmc);
2517 #if defined(CONFIG_DEBUG_FS)
2518 dw_mci_init_debugfs(slot);
2521 /* Card initially undetected */
2522 slot->last_detect_state = 0;
2531 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2533 /* Shutdown detect IRQ */
2534 if (slot->host->pdata->exit)
2535 slot->host->pdata->exit(id);
2537 /* Debugfs stuff is cleaned up by mmc core */
2538 mmc_remove_host(slot->mmc);
2539 slot->host->slot[id] = NULL;
2540 mmc_free_host(slot->mmc);
2543 static void dw_mci_init_dma(struct dw_mci *host)
2545 /* Alloc memory for sg translation */
2546 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2547 &host->sg_dma, GFP_KERNEL);
2548 if (!host->sg_cpu) {
2549 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2554 /* Determine which DMA interface to use */
2555 #ifdef CONFIG_MMC_DW_IDMAC
2556 host->dma_ops = &dw_mci_idmac_ops;
2557 dev_info(host->dev, "Using internal DMA controller.\n");
2563 if (host->dma_ops->init && host->dma_ops->start &&
2564 host->dma_ops->stop && host->dma_ops->cleanup) {
2565 if (host->dma_ops->init(host)) {
2566 dev_err(host->dev, "%s: Unable to initialize "
2567 "DMA Controller.\n", __func__);
2571 dev_err(host->dev, "DMA initialization not found.\n");
2579 dev_info(host->dev, "Using PIO mode.\n");
2584 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2586 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2589 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2590 SDMMC_CTRL_DMA_RESET));
2592 /* wait till resets clear */
2594 ctrl = mci_readl(host, CTRL);
2595 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2596 SDMMC_CTRL_DMA_RESET)))
2598 } while (time_before(jiffies, timeout));
2600 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2606 static struct dw_mci_of_quirks {
2611 .quirk = "broken-cd",
2612 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2616 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2618 struct dw_mci_board *pdata;
2619 struct device *dev = host->dev;
2620 struct device_node *np = dev->of_node;
2621 const struct dw_mci_drv_data *drv_data = host->drv_data;
2623 u32 clock_frequency;
2625 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2627 dev_err(dev, "could not allocate memory for pdata\n");
2628 return ERR_PTR(-ENOMEM);
2631 /* find out number of slots supported */
2632 if (of_property_read_u32(dev->of_node, "num-slots",
2633 &pdata->num_slots)) {
2634 dev_info(dev, "num-slots property not found, "
2635 "assuming 1 slot is available\n");
2636 pdata->num_slots = 1;
2640 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2641 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2642 pdata->quirks |= of_quirks[idx].id;
2644 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2645 dev_info(dev, "fifo-depth property not found, using "
2646 "value of FIFOTH register as default\n");
2648 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2650 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2651 pdata->bus_hz = clock_frequency;
2653 if (drv_data && drv_data->parse_dt) {
2654 ret = drv_data->parse_dt(host);
2656 return ERR_PTR(ret);
2659 if (of_find_property(np, "keep-power-in-suspend", NULL))
2660 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2664 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2665 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2667 if (of_find_property(np, "supports-highspeed", NULL))
2668 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2670 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2671 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2673 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2674 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2676 if (of_get_property(np, "cd-inverted", NULL))
2677 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2678 if (of_get_property(np, "bootpart-no-access", NULL))
2679 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
2684 #else /* CONFIG_OF */
2685 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2687 return ERR_PTR(-EINVAL);
2689 #endif /* CONFIG_OF */
2691 int dw_mci_probe(struct dw_mci *host)
2693 const struct dw_mci_drv_data *drv_data = host->drv_data;
2694 int width, i, ret = 0;
2699 host->pdata = dw_mci_parse_dt(host);
2700 if (IS_ERR(host->pdata)) {
2701 dev_err(host->dev, "platform data not available\n");
2706 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2708 "Platform data must supply select_slot function\n");
2712 host->biu_clk = devm_clk_get(host->dev, "biu");
2713 if (IS_ERR(host->biu_clk)) {
2714 dev_dbg(host->dev, "biu clock not available\n");
2716 ret = clk_prepare_enable(host->biu_clk);
2718 dev_err(host->dev, "failed to enable biu clock\n");
2723 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2724 if (IS_ERR(host->ciu_clk)) {
2725 dev_dbg(host->dev, "ciu clock not available\n");
2726 host->bus_hz = host->pdata->bus_hz;
2728 ret = clk_prepare_enable(host->ciu_clk);
2730 dev_err(host->dev, "failed to enable ciu clock\n");
2736 //test, modify by xbw
2737 host->bus_hz = 50000000;
2739 if (drv_data && drv_data->init) {
2740 ret = drv_data->init(host);
2743 "implementation specific init failed\n");
2746 host->bus_hz = clk_get_rate(host->ciu_clk);
2748 if (drv_data && drv_data->setup_clock) {
2749 ret = drv_data->setup_clock(host);
2752 "implementation specific clock setup failed\n");
2757 if (!host->bus_hz) {
2759 "Platform data must supply bus speed\n");
2764 host->quirks = host->pdata->quirks;
2766 spin_lock_init(&host->lock);
2767 INIT_LIST_HEAD(&host->queue);
2770 * Get the host data width - this assumes that HCON has been set with
2771 * the correct values.
2773 i = (mci_readl(host, HCON) >> 7) & 0x7;
2775 host->push_data = dw_mci_push_data16;
2776 host->pull_data = dw_mci_pull_data16;
2778 host->data_shift = 1;
2779 } else if (i == 2) {
2780 host->push_data = dw_mci_push_data64;
2781 host->pull_data = dw_mci_pull_data64;
2783 host->data_shift = 3;
2785 /* Check for a reserved value, and warn if it is */
2787 "HCON reports a reserved host data width!\n"
2788 "Defaulting to 32-bit access.\n");
2789 host->push_data = dw_mci_push_data32;
2790 host->pull_data = dw_mci_pull_data32;
2792 host->data_shift = 2;
2795 /* Reset all blocks */
2796 if (!mci_wait_reset(host->dev, host))
2799 host->dma_ops = host->pdata->dma_ops;
2800 dw_mci_init_dma(host);
2802 /* Clear the interrupts for the host controller */
2803 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2804 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2806 /* Put in max timeout */
2807 mci_writel(host, TMOUT, 0xFFFFFFFF);
2810 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2811 * Tx Mark = fifo_size / 2 DMA Size = 8
2813 if (!host->pdata->fifo_depth) {
2815 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2816 * have been overwritten by the bootloader, just like we're
2817 * about to do, so if you know the value for your hardware, you
2818 * should put it in the platform data.
2820 fifo_size = mci_readl(host, FIFOTH);
2821 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2823 fifo_size = host->pdata->fifo_depth;
2825 host->fifo_depth = fifo_size;
2827 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2828 mci_writel(host, FIFOTH, host->fifoth_val);
2830 /* disable clock to CIU */
2831 mci_writel(host, CLKENA, 0);
2832 mci_writel(host, CLKSRC, 0);
2835 * In 2.40a spec, Data offset is changed.
2836 * Need to check the version-id and set data-offset for DATA register.
2838 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2839 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2841 if (host->verid < DW_MMC_240A)
2842 host->data_offset = DATA_OFFSET;
2844 host->data_offset = DATA_240A_OFFSET;
2846 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2847 host->card_workqueue = alloc_workqueue("dw-mci-card",
2848 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2849 if (!host->card_workqueue) {
2853 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2854 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2855 host->irq_flags, "dw-mci", host);
2859 if (host->pdata->num_slots)
2860 host->num_slots = host->pdata->num_slots;
2862 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2865 * Enable interrupts for command done, data over, data empty, card det,
2866 * receive ready and error such as transmit, receive timeout, crc error
2868 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2869 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2870 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2871 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2872 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2874 dev_info(host->dev, "DW MMC controller at irq %d, "
2875 "%d bit host data width, "
2877 host->irq, width, fifo_size);
2879 /* We need at least one slot to succeed */
2880 for (i = 0; i < host->num_slots; i++) {
2881 ret = dw_mci_init_slot(host, i);
2883 dev_dbg(host->dev, "slot %d init failed\n", i);
2889 dev_info(host->dev, "%d slots initialized\n", init_slots);
2891 dev_dbg(host->dev, "attempted to initialize %d slots, "
2892 "but failed on all\n", host->num_slots);
2897 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2898 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2903 destroy_workqueue(host->card_workqueue);
2906 if (host->use_dma && host->dma_ops->exit)
2907 host->dma_ops->exit(host);
2910 regulator_disable(host->vmmc);
2913 if (!IS_ERR(host->ciu_clk))
2914 clk_disable_unprepare(host->ciu_clk);
2917 if (!IS_ERR(host->biu_clk))
2918 clk_disable_unprepare(host->biu_clk);
2922 EXPORT_SYMBOL(dw_mci_probe);
2924 void dw_mci_remove(struct dw_mci *host)
2928 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2929 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2931 for (i = 0; i < host->num_slots; i++) {
2932 dev_dbg(host->dev, "remove slot %d\n", i);
2934 dw_mci_cleanup_slot(host->slot[i], i);
2937 /* disable clock to CIU */
2938 mci_writel(host, CLKENA, 0);
2939 mci_writel(host, CLKSRC, 0);
2941 destroy_workqueue(host->card_workqueue);
2943 if (host->use_dma && host->dma_ops->exit)
2944 host->dma_ops->exit(host);
2947 regulator_disable(host->vmmc);
2949 if (!IS_ERR(host->ciu_clk))
2950 clk_disable_unprepare(host->ciu_clk);
2952 if (!IS_ERR(host->biu_clk))
2953 clk_disable_unprepare(host->biu_clk);
2955 EXPORT_SYMBOL(dw_mci_remove);
2959 #ifdef CONFIG_PM_SLEEP
2961 * TODO: we should probably disable the clock to the card in the suspend path.
2963 int dw_mci_suspend(struct dw_mci *host)
2967 for (i = 0; i < host->num_slots; i++) {
2968 struct dw_mci_slot *slot = host->slot[i];
2971 ret = mmc_suspend_host(slot->mmc);
2974 slot = host->slot[i];
2976 mmc_resume_host(host->slot[i]->mmc);
2983 regulator_disable(host->vmmc);
2987 EXPORT_SYMBOL(dw_mci_suspend);
2989 int dw_mci_resume(struct dw_mci *host)
2994 ret = regulator_enable(host->vmmc);
2997 "failed to enable regulator: %d\n", ret);
3002 if (!mci_wait_reset(host->dev, host)) {
3007 if (host->use_dma && host->dma_ops->init)
3008 host->dma_ops->init(host);
3011 * Restore the initial value at FIFOTH register
3012 * And Invalidate the prev_blksz with zero
3014 mci_writel(host, FIFOTH, host->fifoth_val);
3015 host->prev_blksz = 0;
3016 /* Put in max timeout */
3017 mci_writel(host, TMOUT, 0xFFFFFFFF);
3019 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3020 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3021 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3022 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
3023 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3025 for (i = 0; i < host->num_slots; i++) {
3026 struct dw_mci_slot *slot = host->slot[i];
3029 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3030 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3031 dw_mci_setup_bus(slot, true);
3034 // ret = mmc_resume_host(host->slot[i]->mmc);
3040 EXPORT_SYMBOL(dw_mci_resume);
3041 #endif /* CONFIG_PM_SLEEP */
3043 static int __init dw_mci_init(void)
3045 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3049 static void __exit dw_mci_exit(void)
3053 module_init(dw_mci_init);
3054 module_exit(dw_mci_exit);
3056 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3058 MODULE_AUTHOR("NXP Semiconductor VietNam");
3059 MODULE_AUTHOR("Imagination Technologies Ltd");
3060 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
3062 MODULE_LICENSE("GPL v2");