2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/rk_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
38 #include <linux/workqueue.h>
40 #include <linux/of_gpio.h>
41 #include <linux/mmc/slot-gpio.h>
44 #include "rk_sdmmc_of.h"
46 /* Common flag combinations */
47 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
48 SDMMC_INT_HTO | SDMMC_INT_SBE | \
50 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
52 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
53 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
54 #define DW_MCI_SEND_STATUS 1
55 #define DW_MCI_RECV_STATUS 2
56 #define DW_MCI_DMA_THRESHOLD 16
58 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
59 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
61 #define SDMMC_DATA_TIMEOUT_SD 500; /*max is 250ms refer to Spec; Maybe adapt the value to the sick card.*/
62 #define SDMMC_DATA_TIMEOUT_SDIO 250
63 #define SDMMC_DATA_TIMEOUT_EMMC 2500
65 #ifdef CONFIG_MMC_DW_IDMAC
66 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
67 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
68 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
72 u32 des0; /* Control Descriptor */
73 #define IDMAC_DES0_DIC BIT(1)
74 #define IDMAC_DES0_LD BIT(2)
75 #define IDMAC_DES0_FD BIT(3)
76 #define IDMAC_DES0_CH BIT(4)
77 #define IDMAC_DES0_ER BIT(5)
78 #define IDMAC_DES0_CES BIT(30)
79 #define IDMAC_DES0_OWN BIT(31)
81 u32 des1; /* Buffer sizes */
82 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
83 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
85 u32 des2; /* buffer 1 physical address */
87 u32 des3; /* buffer 2 physical address */
89 #endif /* CONFIG_MMC_DW_IDMAC */
91 static const u8 tuning_blk_pattern_4bit[] = {
92 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
93 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
94 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
95 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
96 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
97 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
98 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
99 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
102 static const u8 tuning_blk_pattern_8bit[] = {
103 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
104 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
105 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
106 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
107 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
108 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
109 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
110 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
111 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
112 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
113 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
114 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
115 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
116 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
117 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
118 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
121 /*printk the all register of current host*/
122 static int dw_mci_regs_printk(struct dw_mci *host)
124 struct sdmmc_reg *regs = dw_mci_regs;
126 while( regs->name != 0 ){
127 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
130 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
135 #if defined(CONFIG_DEBUG_FS)
136 static int dw_mci_req_show(struct seq_file *s, void *v)
138 struct dw_mci_slot *slot = s->private;
139 struct mmc_request *mrq;
140 struct mmc_command *cmd;
141 struct mmc_command *stop;
142 struct mmc_data *data;
144 /* Make sure we get a consistent snapshot */
145 spin_lock_bh(&slot->host->lock);
155 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
156 cmd->opcode, cmd->arg, cmd->flags,
157 cmd->resp[0], cmd->resp[1], cmd->resp[2],
158 cmd->resp[2], cmd->error);
160 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
161 data->bytes_xfered, data->blocks,
162 data->blksz, data->flags, data->error);
165 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
166 stop->opcode, stop->arg, stop->flags,
167 stop->resp[0], stop->resp[1], stop->resp[2],
168 stop->resp[2], stop->error);
171 spin_unlock_bh(&slot->host->lock);
176 static int dw_mci_req_open(struct inode *inode, struct file *file)
178 return single_open(file, dw_mci_req_show, inode->i_private);
181 static const struct file_operations dw_mci_req_fops = {
182 .owner = THIS_MODULE,
183 .open = dw_mci_req_open,
186 .release = single_release,
189 static int dw_mci_regs_show(struct seq_file *s, void *v)
191 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
192 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
193 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
194 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
195 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
196 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
201 static int dw_mci_regs_open(struct inode *inode, struct file *file)
203 return single_open(file, dw_mci_regs_show, inode->i_private);
206 static const struct file_operations dw_mci_regs_fops = {
207 .owner = THIS_MODULE,
208 .open = dw_mci_regs_open,
211 .release = single_release,
214 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
216 struct mmc_host *mmc = slot->mmc;
217 struct dw_mci *host = slot->host;
221 root = mmc->debugfs_root;
225 node = debugfs_create_file("regs", S_IRUSR, root, host,
230 node = debugfs_create_file("req", S_IRUSR, root, slot,
235 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
239 node = debugfs_create_x32("pending_events", S_IRUSR, root,
240 (u32 *)&host->pending_events);
244 node = debugfs_create_x32("completed_events", S_IRUSR, root,
245 (u32 *)&host->completed_events);
252 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
254 #endif /* defined(CONFIG_DEBUG_FS) */
256 static void dw_mci_set_timeout(struct dw_mci *host)
258 /* timeout (maximum) */
259 mci_writel(host, TMOUT, 0xffffffff);
262 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
264 struct mmc_data *data;
265 struct dw_mci_slot *slot = mmc_priv(mmc);
266 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
268 cmd->error = -EINPROGRESS;
272 if (cmdr == MMC_STOP_TRANSMISSION)
273 cmdr |= SDMMC_CMD_STOP;
275 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
277 if (cmd->flags & MMC_RSP_PRESENT) {
278 /* We expect a response, so set this bit */
279 cmdr |= SDMMC_CMD_RESP_EXP;
280 if (cmd->flags & MMC_RSP_136)
281 cmdr |= SDMMC_CMD_RESP_LONG;
284 if (cmd->flags & MMC_RSP_CRC)
285 cmdr |= SDMMC_CMD_RESP_CRC;
289 cmdr |= SDMMC_CMD_DAT_EXP;
290 if (data->flags & MMC_DATA_STREAM)
291 cmdr |= SDMMC_CMD_STRM_MODE;
292 if (data->flags & MMC_DATA_WRITE)
293 cmdr |= SDMMC_CMD_DAT_WR;
296 if (drv_data && drv_data->prepare_command)
297 drv_data->prepare_command(slot->host, &cmdr);
302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
304 struct mmc_command *stop;
310 stop = &host->stop_abort;
312 memset(stop, 0, sizeof(struct mmc_command));
314 if (cmdr == MMC_READ_SINGLE_BLOCK ||
315 cmdr == MMC_READ_MULTIPLE_BLOCK ||
316 cmdr == MMC_WRITE_BLOCK ||
317 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
318 stop->opcode = MMC_STOP_TRANSMISSION;
320 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
321 } else if (cmdr == SD_IO_RW_EXTENDED) {
322 stop->opcode = SD_IO_RW_DIRECT;
323 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
324 ((cmd->arg >> 28) & 0x7);
325 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
330 cmdr = stop->opcode | SDMMC_CMD_STOP |
331 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
336 static void dw_mci_start_command(struct dw_mci *host,
337 struct mmc_command *cmd, u32 cmd_flags)
341 "start command: ARGR=0x%08x CMDR=0x%08x\n",
342 cmd->arg, cmd_flags);
344 mci_writel(host, CMDARG, cmd->arg);
346 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s start cmd=%d, arg=0x%x[%s]",__LINE__, __FUNCTION__,cmd->opcode, cmd->arg,mmc_hostname(host->mmc));
347 //dw_mci_regs_printk(host);
349 if(host->mmc->hold_reg_flag)
350 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;//fix the value to 1 in some Soc,for example RK3188.
352 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
355 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
357 dw_mci_start_command(host, data->stop, host->stop_cmdr);
360 /* DMA interface functions */
361 static void dw_mci_stop_dma(struct dw_mci *host)
363 if (host->using_dma) {
364 host->dma_ops->stop(host);
365 host->dma_ops->cleanup(host);
368 /* Data transfer was stopped by the interrupt handler */
369 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
372 static int dw_mci_get_dma_dir(struct mmc_data *data)
374 if (data->flags & MMC_DATA_WRITE)
375 return DMA_TO_DEVICE;
377 return DMA_FROM_DEVICE;
380 #ifdef CONFIG_MMC_DW_IDMAC
381 static void dw_mci_dma_cleanup(struct dw_mci *host)
383 struct mmc_data *data = host->data;
386 if (!data->host_cookie)
387 dma_unmap_sg(host->dev,
390 dw_mci_get_dma_dir(data));
393 static void dw_mci_idmac_reset(struct dw_mci *host)
395 u32 bmod = mci_readl(host, BMOD);
396 /* Software reset of DMA */
397 bmod |= SDMMC_IDMAC_SWRESET;
398 mci_writel(host, BMOD, bmod);
401 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
405 /* Disable and reset the IDMAC interface */
406 temp = mci_readl(host, CTRL);
407 temp &= ~SDMMC_CTRL_USE_IDMAC;
408 temp |= SDMMC_CTRL_DMA_RESET;
409 mci_writel(host, CTRL, temp);
411 /* Stop the IDMAC running */
412 temp = mci_readl(host, BMOD);
413 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
414 temp |= SDMMC_IDMAC_SWRESET;
415 mci_writel(host, BMOD, temp);
418 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
420 struct mmc_data *data = host->data;
422 dev_vdbg(host->dev, "DMA complete\n");
425 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
426 host->mrq->cmd->opcode,host->mrq->cmd->arg,data->blocks,data->blksz,mmc_hostname(host->mmc));
429 host->dma_ops->cleanup(host);
432 * If the card was removed, data will be NULL. No point in trying to
433 * send the stop command or waiting for NBUSY in this case.
436 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
437 tasklet_schedule(&host->tasklet);
441 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
445 struct idmac_desc *desc = host->sg_cpu;
447 for (i = 0; i < sg_len; i++, desc++) {
448 unsigned int length = sg_dma_len(&data->sg[i]);
449 u32 mem_addr = sg_dma_address(&data->sg[i]);
451 /* Set the OWN bit and disable interrupts for this descriptor */
452 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
455 IDMAC_SET_BUFFER1_SIZE(desc, length);
457 /* Physical address to DMA to/from */
458 desc->des2 = mem_addr;
461 /* Set first descriptor */
463 desc->des0 |= IDMAC_DES0_FD;
465 /* Set last descriptor */
466 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
467 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
468 desc->des0 |= IDMAC_DES0_LD;
473 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
477 dw_mci_translate_sglist(host, host->data, sg_len);
479 /* Select IDMAC interface */
480 temp = mci_readl(host, CTRL);
481 temp |= SDMMC_CTRL_USE_IDMAC;
482 mci_writel(host, CTRL, temp);
486 /* Enable the IDMAC */
487 temp = mci_readl(host, BMOD);
488 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
489 mci_writel(host, BMOD, temp);
491 /* Start it running */
492 mci_writel(host, PLDMND, 1);
495 static int dw_mci_idmac_init(struct dw_mci *host)
497 struct idmac_desc *p;
500 /* Number of descriptors in the ring buffer */
501 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
503 /* Forward link the descriptor list */
504 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
505 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
507 /* Set the last descriptor as the end-of-ring descriptor */
508 p->des3 = host->sg_dma;
509 p->des0 = IDMAC_DES0_ER;
511 dw_mci_idmac_reset(host);
513 /* Mask out interrupts - get Tx & Rx complete only */
514 mci_writel(host, IDSTS, IDMAC_INT_CLR);
515 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
518 /* Set the descriptor base address */
519 mci_writel(host, DBADDR, host->sg_dma);
523 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
524 .init = dw_mci_idmac_init,
525 .start = dw_mci_idmac_start_dma,
526 .stop = dw_mci_idmac_stop_dma,
527 .complete = dw_mci_idmac_complete_dma,
528 .cleanup = dw_mci_dma_cleanup,
530 #endif /* CONFIG_MMC_DW_IDMAC */
532 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
533 struct mmc_data *data,
536 struct scatterlist *sg;
537 unsigned int i, sg_len;
539 if (!next && data->host_cookie)
540 return data->host_cookie;
543 * We don't do DMA on "complex" transfers, i.e. with
544 * non-word-aligned buffers or lengths. Also, we don't bother
545 * with all the DMA setup overhead for short transfers.
547 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
553 for_each_sg(data->sg, sg, data->sg_len, i) {
554 if (sg->offset & 3 || sg->length & 3)
558 sg_len = dma_map_sg(host->dev,
561 dw_mci_get_dma_dir(data));
566 data->host_cookie = sg_len;
571 static void dw_mci_pre_req(struct mmc_host *mmc,
572 struct mmc_request *mrq,
575 struct dw_mci_slot *slot = mmc_priv(mmc);
576 struct mmc_data *data = mrq->data;
578 if (!slot->host->use_dma || !data)
581 if (data->host_cookie) {
582 data->host_cookie = 0;
586 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
587 data->host_cookie = 0;
590 static void dw_mci_post_req(struct mmc_host *mmc,
591 struct mmc_request *mrq,
594 struct dw_mci_slot *slot = mmc_priv(mmc);
595 struct mmc_data *data = mrq->data;
597 if (!slot->host->use_dma || !data)
600 if (data->host_cookie)
601 dma_unmap_sg(slot->host->dev,
604 dw_mci_get_dma_dir(data));
605 data->host_cookie = 0;
608 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
610 #ifdef CONFIG_MMC_DW_IDMAC
611 unsigned int blksz = data->blksz;
612 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
613 u32 fifo_width = 1 << host->data_shift;
614 u32 blksz_depth = blksz / fifo_width, fifoth_val;
615 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
616 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
618 tx_wmark = (host->fifo_depth) / 2;
619 tx_wmark_invers = host->fifo_depth - tx_wmark;
623 * if blksz is not a multiple of the FIFO width
625 if (blksz % fifo_width) {
632 if (!((blksz_depth % mszs[idx]) ||
633 (tx_wmark_invers % mszs[idx]))) {
635 rx_wmark = mszs[idx] - 1;
640 * If idx is '0', it won't be tried
641 * Thus, initial values are uesed
644 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
645 mci_writel(host, FIFOTH, fifoth_val);
649 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
651 unsigned int blksz = data->blksz;
652 u32 blksz_depth, fifo_depth;
655 WARN_ON(!(data->flags & MMC_DATA_READ));
657 if (host->timing != MMC_TIMING_MMC_HS200 &&
658 host->timing != MMC_TIMING_UHS_SDR104)
661 blksz_depth = blksz / (1 << host->data_shift);
662 fifo_depth = host->fifo_depth;
664 if (blksz_depth > fifo_depth)
668 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
669 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
670 * Currently just choose blksz.
673 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
677 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
680 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
687 /* If we don't have a channel, we can't do DMA */
691 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
693 host->dma_ops->stop(host);
700 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
701 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
705 * Decide the MSIZE and RX/TX Watermark.
706 * If current block size is same with previous size,
707 * no need to update fifoth.
709 if (host->prev_blksz != data->blksz)
710 dw_mci_adjust_fifoth(host, data);
712 /* Enable the DMA interface */
713 temp = mci_readl(host, CTRL);
714 temp |= SDMMC_CTRL_DMA_ENABLE;
715 mci_writel(host, CTRL, temp);
717 /* Disable RX/TX IRQs, let DMA handle it */
718 temp = mci_readl(host, INTMASK);
719 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
720 mci_writel(host, INTMASK, temp);
722 host->dma_ops->start(host, sg_len);
727 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
731 data->error = -EINPROGRESS;
737 if (data->flags & MMC_DATA_READ) {
738 host->dir_status = DW_MCI_RECV_STATUS;
739 dw_mci_ctrl_rd_thld(host, data);
741 host->dir_status = DW_MCI_SEND_STATUS;
744 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
745 data->blocks, data->blksz, mmc_hostname(host->mmc));
747 if (dw_mci_submit_data_dma(host, data)) {
748 int flags = SG_MITER_ATOMIC;
749 if (host->data->flags & MMC_DATA_READ)
750 flags |= SG_MITER_TO_SG;
752 flags |= SG_MITER_FROM_SG;
754 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
756 host->part_buf_start = 0;
757 host->part_buf_count = 0;
759 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
760 temp = mci_readl(host, INTMASK);
761 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
762 mci_writel(host, INTMASK, temp);
764 temp = mci_readl(host, CTRL);
765 temp &= ~SDMMC_CTRL_DMA_ENABLE;
766 mci_writel(host, CTRL, temp);
769 * Use the initial fifoth_val for PIO mode.
770 * If next issued data may be transfered by DMA mode,
771 * prev_blksz should be invalidated.
773 mci_writel(host, FIFOTH, host->fifoth_val);
774 host->prev_blksz = 0;
777 * Keep the current block size.
778 * It will be used to decide whether to update
779 * fifoth register next time.
781 host->prev_blksz = data->blksz;
785 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
787 struct dw_mci *host = slot->host;
788 unsigned long timeout = jiffies + msecs_to_jiffies(500);
789 unsigned int cmd_status = 0;
791 mci_writel(host, CMDARG, arg);
793 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
795 while (time_before(jiffies, timeout)) {
796 cmd_status = mci_readl(host, CMD);
797 if (!(cmd_status & SDMMC_CMD_START))
800 dev_err(&slot->mmc->class_dev,
801 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
802 cmd, arg, cmd_status);
805 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
807 struct dw_mci *host = slot->host;
808 unsigned int clock = slot->clock;
814 mci_writel(host, CLKENA, 0);
816 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
817 } else if (clock != host->current_speed || force_clkinit) {
818 div = host->bus_hz / clock;
819 if (host->bus_hz % clock && host->bus_hz > clock)
821 * move the + 1 after the divide to prevent
822 * over-clocking the card.
826 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
828 if ((clock << div) != slot->__clk_old || force_clkinit)
829 dev_info(&slot->mmc->class_dev,
830 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
831 slot->id, host->bus_hz, clock,
832 div ? ((host->bus_hz / div) >> 1) :
836 mci_writel(host, CLKENA, 0);
837 mci_writel(host, CLKSRC, 0);
841 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
843 /* set clock to desired speed */
844 mci_writel(host, CLKDIV, div);
848 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
850 /* enable clock; only low power if no SDIO */
851 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
853 if (host->verid < DW_MMC_240A)
854 sdio_int = SDMMC_INT_SDIO(slot->id);
856 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
858 if (!(mci_readl(host, INTMASK) & sdio_int))
859 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
860 mci_writel(host, CLKENA, clk_en_a);
864 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
866 /* keep the clock with reflecting clock dividor */
867 slot->__clk_old = clock << div;
870 host->current_speed = clock;
872 if(slot->ctype != slot->pre_ctype)
873 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]", \
874 div ? ((host->bus_hz / div) >> 1):host->bus_hz, \
875 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits", mmc_hostname(host->mmc));
876 slot->pre_ctype = slot->ctype;
878 /* Set the current slot bus width */
879 mci_writel(host, CTYPE, (slot->ctype << slot->id));
883 static void dw_mci_wait_unbusy(struct dw_mci *host)
885 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
886 unsigned long time_loop;
889 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
891 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
892 timeout = SDMMC_DATA_TIMEOUT_EMMC;
893 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
894 timeout = SDMMC_DATA_TIMEOUT_SD;
896 time_loop = jiffies + msecs_to_jiffies(timeout);
898 status = mci_readl(host, STATUS);
899 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
901 //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");
902 } while (time_before(jiffies, time_loop));
905 static void __dw_mci_start_request(struct dw_mci *host,
906 struct dw_mci_slot *slot,
907 struct mmc_command *cmd)
909 struct mmc_request *mrq;
910 struct mmc_data *data;
914 if (host->pdata->select_slot)
915 host->pdata->select_slot(slot->id);
917 host->cur_slot = slot;
919 #if 0 //add by xbw,at 2014-03-12
920 /*clean FIFO if it is a new request*/
921 if(!(mrq->cmd->opcode & SDMMC_CMD_STOP)) {
922 MMC_DBG_INFO_FUNC("%d..%s: reset the ctrl.", __LINE__, __FUNCTION__);
923 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
924 SDMMC_CTRL_DMA_RESET));
927 dw_mci_wait_unbusy(host);
929 host->pending_events = 0;
930 host->completed_events = 0;
931 host->data_status = 0;
935 dw_mci_set_timeout(host);
936 mci_writel(host, BYTCNT, data->blksz*data->blocks);
937 mci_writel(host, BLKSIZ, data->blksz);
940 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
942 /* this is the first command, send the initialization clock */
943 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
944 cmdflags |= SDMMC_CMD_INIT;
947 dw_mci_submit_data(host, data);
951 dw_mci_start_command(host, cmd, cmdflags);
954 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
957 static void dw_mci_start_request(struct dw_mci *host,
958 struct dw_mci_slot *slot)
960 struct mmc_request *mrq = slot->mrq;
961 struct mmc_command *cmd;
963 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
964 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
966 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
967 __dw_mci_start_request(host, slot, cmd);
970 /* must be called with host->lock held */
971 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
972 struct mmc_request *mrq)
974 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
979 if (host->state == STATE_IDLE) {
980 host->state = STATE_SENDING_CMD;
981 dw_mci_start_request(host, slot);
983 list_add_tail(&slot->queue_node, &host->queue);
987 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
989 struct dw_mci_slot *slot = mmc_priv(mmc);
990 struct dw_mci *host = slot->host;
995 * The check for card presence and queueing of the request must be
996 * atomic, otherwise the card could be removed in between and the
997 * request wouldn't fail until another card was inserted.
999 spin_lock_bh(&host->lock);
1001 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1002 spin_unlock_bh(&host->lock);
1003 mrq->cmd->error = -ENOMEDIUM;
1005 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_reqeust--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(host->mmc));
1007 mmc_request_done(mmc, mrq);
1010 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1011 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1013 dw_mci_queue_request(host, slot, mrq);
1015 spin_unlock_bh(&host->lock);
1018 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1020 struct dw_mci_slot *slot = mmc_priv(mmc);
1021 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1024 switch (ios->bus_width) {
1025 case MMC_BUS_WIDTH_4:
1026 slot->ctype = SDMMC_CTYPE_4BIT;
1028 case MMC_BUS_WIDTH_8:
1029 slot->ctype = SDMMC_CTYPE_8BIT;
1032 /* set default 1 bit mode */
1033 slot->ctype = SDMMC_CTYPE_1BIT;
1034 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1037 regs = mci_readl(slot->host, UHS_REG);
1040 if (ios->timing == MMC_TIMING_UHS_DDR50)
1041 regs |= ((0x1 << slot->id) << 16);
1043 regs &= ~((0x1 << slot->id) << 16);
1045 mci_writel(slot->host, UHS_REG, regs);
1046 slot->host->timing = ios->timing;
1049 * Use mirror of ios->clock to prevent race with mmc
1050 * core ios update when finding the minimum.
1052 slot->clock = ios->clock;
1054 if (drv_data && drv_data->set_ios)
1055 drv_data->set_ios(slot->host, ios);
1057 /* Slot specific timing and width adjustment */
1058 dw_mci_setup_bus(slot, false);
1060 switch (ios->power_mode) {
1062 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1064 if (slot->host->pdata->setpower)
1065 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1066 regs = mci_readl(slot->host, PWREN);
1067 regs |= (1 << slot->id);
1068 mci_writel(slot->host, PWREN, regs);
1071 /* Power down slot */
1072 if (slot->host->pdata->setpower)
1073 slot->host->pdata->setpower(slot->id, 0);
1074 regs = mci_readl(slot->host, PWREN);
1075 regs &= ~(1 << slot->id);
1076 mci_writel(slot->host, PWREN, regs);
1083 static int dw_mci_get_ro(struct mmc_host *mmc)
1086 struct dw_mci_slot *slot = mmc_priv(mmc);
1087 struct dw_mci_board *brd = slot->host->pdata;
1089 /* Use platform get_ro function, else try on board write protect */
1090 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1092 else if (brd->get_ro)
1093 read_only = brd->get_ro(slot->id);
1094 else if (gpio_is_valid(slot->wp_gpio))
1095 read_only = gpio_get_value(slot->wp_gpio);
1098 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1100 dev_dbg(&mmc->class_dev, "card is %s\n",
1101 read_only ? "read-only" : "read-write");
1106 static int dw_mci_get_cd(struct mmc_host *mmc)
1109 struct dw_mci_slot *slot = mmc_priv(mmc);
1110 struct dw_mci_board *brd = slot->host->pdata;
1111 struct dw_mci *host = slot->host;
1112 int gpio_cd = mmc_gpio_get_cd(mmc);
1114 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
1115 spin_lock_bh(&host->lock);
1116 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1117 spin_unlock_bh(&host->lock);
1122 /* Use platform get_cd function, else try onboard card detect */
1123 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1125 else if (brd->get_cd)
1126 present = !brd->get_cd(slot->id);
1127 else if (!IS_ERR_VALUE(gpio_cd))
1130 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1133 spin_lock_bh(&host->lock);
1135 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1136 dev_dbg(&mmc->class_dev, "card is present\n");
1138 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1139 dev_dbg(&mmc->class_dev, "card is not present\n");
1141 spin_unlock_bh(&host->lock);
1146 static void dw_mci_hw_reset(struct mmc_host *mmc)
1148 struct dw_mci_slot *slot = mmc_priv(mmc);
1151 * According to eMMC spec
1152 * tRstW >= 1us ; RST_n pulse width
1153 * tRSCA >= 200us ; RST_n to Command time
1154 * tRSTH >= 1us ; RST_n high period
1157 mci_writel(slot->host, RST_n, 0x1);
1159 udelay(10); //10us for bad quality eMMc.
1161 mci_writel(slot->host, RST_n, 0x0);
1163 usleep_range(300, 1000); //ay least 300(> 200us)
1168 * Disable lower power mode.
1170 * Low power mode will stop the card clock when idle. According to the
1171 * description of the CLKENA register we should disable low power mode
1172 * for SDIO cards if we need SDIO interrupts to work.
1174 * This function is fast if low power mode is already disabled.
1176 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1178 struct dw_mci *host = slot->host;
1180 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1182 clk_en_a = mci_readl(host, CLKENA);
1184 if (clk_en_a & clken_low_pwr) {
1185 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1186 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1187 SDMMC_CMD_PRV_DAT_WAIT, 0);
1191 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1193 struct dw_mci_slot *slot = mmc_priv(mmc);
1194 struct dw_mci *host = slot->host;
1198 /* Enable/disable Slot Specific SDIO interrupt */
1199 int_mask = mci_readl(host, INTMASK);
1201 if (host->verid < DW_MMC_240A)
1202 sdio_int = SDMMC_INT_SDIO(slot->id);
1204 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1208 * Turn off low power mode if it was enabled. This is a bit of
1209 * a heavy operation and we disable / enable IRQs a lot, so
1210 * we'll leave low power mode disabled and it will get
1211 * re-enabled again in dw_mci_setup_bus().
1213 dw_mci_disable_low_power(slot);
1215 mci_writel(host, INTMASK,
1216 (int_mask | sdio_int));
1218 mci_writel(host, INTMASK,
1219 (int_mask & ~sdio_int));
1223 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1225 struct dw_mci_slot *slot = mmc_priv(mmc);
1226 struct dw_mci *host = slot->host;
1227 const struct dw_mci_drv_data *drv_data = host->drv_data;
1228 struct dw_mci_tuning_data tuning_data;
1231 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1232 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1233 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1234 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1235 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1236 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1237 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1241 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1242 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1243 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1246 "Undefined command(%d) for tuning\n", opcode);
1250 if (drv_data && drv_data->execute_tuning)
1251 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1255 static const struct mmc_host_ops dw_mci_ops = {
1256 .request = dw_mci_request,
1257 .pre_req = dw_mci_pre_req,
1258 .post_req = dw_mci_post_req,
1259 .set_ios = dw_mci_set_ios,
1260 .get_ro = dw_mci_get_ro,
1261 .get_cd = dw_mci_get_cd,
1262 .hw_reset = dw_mci_hw_reset,
1263 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1264 .execute_tuning = dw_mci_execute_tuning,
1267 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1268 __releases(&host->lock)
1269 __acquires(&host->lock)
1271 if(DW_MCI_SEND_STATUS == host->dir_status){
1273 if( MMC_BUS_TEST_W != host->cmd->opcode){
1274 if(host->data_status & SDMMC_INT_DCRC)
1275 host->data->error = -EILSEQ;
1276 else if(host->data_status & SDMMC_INT_EBE)
1277 host->data->error = -ETIMEDOUT;
1279 dw_mci_wait_unbusy(host);
1282 dw_mci_wait_unbusy(host);
1288 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1289 __releases(&host->lock)
1290 __acquires(&host->lock)
1292 struct dw_mci_slot *slot;
1293 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1295 WARN_ON(host->cmd || host->data);
1297 dw_mci_deal_data_end(host, mrq);
1300 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1301 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1303 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1304 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1306 host->cur_slot->mrq = NULL;
1308 if (!list_empty(&host->queue)) {
1309 slot = list_entry(host->queue.next,
1310 struct dw_mci_slot, queue_node);
1311 list_del(&slot->queue_node);
1312 dev_vdbg(host->dev, "list not empty: %s is next\n",
1313 mmc_hostname(slot->mmc));
1314 host->state = STATE_SENDING_CMD;
1315 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1316 dw_mci_start_request(host, slot);
1318 dev_vdbg(host->dev, "list empty\n");
1319 host->state = STATE_IDLE;
1322 spin_unlock(&host->lock);
1323 mmc_request_done(prev_mmc, mrq);
1324 spin_lock(&host->lock);
1327 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1329 u32 status = host->cmd_status;
1331 host->cmd_status = 0;
1333 /* Read the response from the card (up to 16 bytes) */
1334 if (cmd->flags & MMC_RSP_PRESENT) {
1335 if (cmd->flags & MMC_RSP_136) {
1336 cmd->resp[3] = mci_readl(host, RESP0);
1337 cmd->resp[2] = mci_readl(host, RESP1);
1338 cmd->resp[1] = mci_readl(host, RESP2);
1339 cmd->resp[0] = mci_readl(host, RESP3);
1341 MMC_DBG_INFO_FUNC(host->mmc," command complete [%s], \ncmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x", \
1342 mmc_hostname(host->mmc), cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0]);
1344 cmd->resp[0] = mci_readl(host, RESP0);
1348 MMC_DBG_INFO_FUNC(host->mmc, " command complete [%s], cmd=%d,resp[0]=0x%x",\
1349 mmc_hostname(host->mmc),cmd->opcode, cmd->resp[0]);
1353 if (status & SDMMC_INT_RTO)
1354 cmd->error = -ETIMEDOUT;
1355 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1356 cmd->error = -EILSEQ;
1357 else if (status & SDMMC_INT_RESP_ERR)
1361 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1364 if(MMC_SEND_STATUS != cmd->opcode)
1365 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",\
1366 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1368 /* newer ip versions need a delay between retries */
1369 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1375 static void dw_mci_tasklet_func(unsigned long priv)
1377 struct dw_mci *host = (struct dw_mci *)priv;
1378 struct dw_mci_slot *slot = mmc_priv(host->mmc);
1379 struct mmc_data *data;
1380 struct mmc_command *cmd;
1381 enum dw_mci_state state;
1382 enum dw_mci_state prev_state;
1385 spin_lock(&host->lock);
1387 state = host->state;
1397 case STATE_SENDING_CMD:
1398 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1399 &host->pending_events))
1404 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1405 dw_mci_command_complete(host, cmd);
1406 if (cmd == host->mrq->sbc && !cmd->error) {
1407 prev_state = state = STATE_SENDING_CMD;
1408 __dw_mci_start_request(host, host->cur_slot,
1413 if (cmd->data && cmd->error) {
1414 dw_mci_stop_dma(host);
1417 send_stop_cmd(host, data);
1418 state = STATE_SENDING_STOP;
1424 send_stop_abort(host, data);
1425 state = STATE_SENDING_STOP;
1431 if (!host->mrq->data || cmd->error) {
1432 dw_mci_request_end(host, host->mrq);
1436 prev_state = state = STATE_SENDING_DATA;
1439 case STATE_SENDING_DATA:
1440 if (test_and_clear_bit(EVENT_DATA_ERROR,
1441 &host->pending_events)) {
1442 dw_mci_stop_dma(host);
1445 send_stop_cmd(host, data);
1447 send_stop_abort(host, data);
1449 state = STATE_DATA_ERROR;
1452 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
1453 prev_state,state, mmc_hostname(host->mmc));
1455 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1456 &host->pending_events))
1458 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
1459 prev_state,state,mmc_hostname(host->mmc));
1461 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1462 prev_state = state = STATE_DATA_BUSY;
1465 case STATE_DATA_BUSY:
1466 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1467 &host->pending_events))
1470 dw_mci_deal_data_end(host, host->mrq);
1471 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
1472 prev_state,state,mmc_hostname(host->mmc));
1475 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1476 status = host->data_status;
1478 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1479 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
1480 MMC_DBG_ERR_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
1481 prev_state,state, status, mmc_hostname(host->mmc));
1483 if (status & SDMMC_INT_DRTO) {
1484 data->error = -ETIMEDOUT;
1485 } else if (status & SDMMC_INT_DCRC) {
1486 data->error = -EILSEQ;
1487 } else if (status & SDMMC_INT_EBE &&
1489 DW_MCI_SEND_STATUS) {
1491 * No data CRC status was returned.
1492 * The number of bytes transferred will
1493 * be exaggerated in PIO mode.
1495 data->bytes_xfered = 0;
1496 data->error = -ETIMEDOUT;
1505 * After an error, there may be data lingering
1506 * in the FIFO, so reset it - doing so
1507 * generates a block interrupt, hence setting
1508 * the scatter-gather pointer to NULL.
1510 sg_miter_stop(&host->sg_miter);
1512 ctrl = mci_readl(host, CTRL);
1513 ctrl |= SDMMC_CTRL_FIFO_RESET;
1514 mci_writel(host, CTRL, ctrl);
1516 data->bytes_xfered = data->blocks * data->blksz;
1521 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
1522 prev_state,state,mmc_hostname(host->mmc));
1523 dw_mci_request_end(host, host->mrq);
1526 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
1527 prev_state,state,mmc_hostname(host->mmc));
1529 if (host->mrq->sbc && !data->error) {
1530 data->stop->error = 0;
1532 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
1533 prev_state,state,mmc_hostname(host->mmc));
1535 dw_mci_request_end(host, host->mrq);
1539 prev_state = state = STATE_SENDING_STOP;
1541 send_stop_cmd(host, data);
1543 if (data->stop && !data->error) {
1544 /* stop command for open-ended transfer*/
1546 send_stop_abort(host, data);
1550 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
1551 prev_state,state,mmc_hostname(host->mmc));
1553 case STATE_SENDING_STOP:
1554 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1555 &host->pending_events))
1557 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
1558 prev_state,state,mmc_hostname(host->mmc));
1560 /* CMD error in data command */
1561 if (host->mrq->cmd->error && host->mrq->data) {
1562 sg_miter_stop(&host->sg_miter);
1564 ctrl = mci_readl(host, CTRL);
1565 ctrl |= SDMMC_CTRL_FIFO_RESET;
1566 mci_writel(host, CTRL, ctrl);
1572 dw_mci_command_complete(host, host->mrq->stop);
1574 if (host->mrq->stop)
1575 dw_mci_command_complete(host, host->mrq->stop);
1577 host->cmd_status = 0;
1580 dw_mci_request_end(host, host->mrq);
1583 case STATE_DATA_ERROR:
1584 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1585 &host->pending_events))
1588 state = STATE_DATA_BUSY;
1591 } while (state != prev_state);
1593 host->state = state;
1595 spin_unlock(&host->lock);
1599 /* push final bytes to part_buf, only use during push */
1600 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1602 memcpy((void *)&host->part_buf, buf, cnt);
1603 host->part_buf_count = cnt;
1606 /* append bytes to part_buf, only use during push */
1607 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1609 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1610 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1611 host->part_buf_count += cnt;
1615 /* pull first bytes from part_buf, only use during pull */
1616 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1618 cnt = min(cnt, (int)host->part_buf_count);
1620 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1622 host->part_buf_count -= cnt;
1623 host->part_buf_start += cnt;
1628 /* pull final bytes from the part_buf, assuming it's just been filled */
1629 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1631 memcpy(buf, &host->part_buf, cnt);
1632 host->part_buf_start = cnt;
1633 host->part_buf_count = (1 << host->data_shift) - cnt;
1636 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1638 struct mmc_data *data = host->data;
1641 /* try and push anything in the part_buf */
1642 if (unlikely(host->part_buf_count)) {
1643 int len = dw_mci_push_part_bytes(host, buf, cnt);
1646 if (host->part_buf_count == 2) {
1647 mci_writew(host, DATA(host->data_offset),
1649 host->part_buf_count = 0;
1652 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1653 if (unlikely((unsigned long)buf & 0x1)) {
1655 u16 aligned_buf[64];
1656 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1657 int items = len >> 1;
1659 /* memcpy from input buffer into aligned buffer */
1660 memcpy(aligned_buf, buf, len);
1663 /* push data from aligned buffer into fifo */
1664 for (i = 0; i < items; ++i)
1665 mci_writew(host, DATA(host->data_offset),
1672 for (; cnt >= 2; cnt -= 2)
1673 mci_writew(host, DATA(host->data_offset), *pdata++);
1676 /* put anything remaining in the part_buf */
1678 dw_mci_set_part_bytes(host, buf, cnt);
1679 /* Push data if we have reached the expected data length */
1680 if ((data->bytes_xfered + init_cnt) ==
1681 (data->blksz * data->blocks))
1682 mci_writew(host, DATA(host->data_offset),
1687 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1689 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1690 if (unlikely((unsigned long)buf & 0x1)) {
1692 /* pull data from fifo into aligned buffer */
1693 u16 aligned_buf[64];
1694 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1695 int items = len >> 1;
1697 for (i = 0; i < items; ++i)
1698 aligned_buf[i] = mci_readw(host,
1699 DATA(host->data_offset));
1700 /* memcpy from aligned buffer into output buffer */
1701 memcpy(buf, aligned_buf, len);
1709 for (; cnt >= 2; cnt -= 2)
1710 *pdata++ = mci_readw(host, DATA(host->data_offset));
1714 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1715 dw_mci_pull_final_bytes(host, buf, cnt);
1719 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1721 struct mmc_data *data = host->data;
1724 /* try and push anything in the part_buf */
1725 if (unlikely(host->part_buf_count)) {
1726 int len = dw_mci_push_part_bytes(host, buf, cnt);
1729 if (host->part_buf_count == 4) {
1730 mci_writel(host, DATA(host->data_offset),
1732 host->part_buf_count = 0;
1735 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1736 if (unlikely((unsigned long)buf & 0x3)) {
1738 u32 aligned_buf[32];
1739 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1740 int items = len >> 2;
1742 /* memcpy from input buffer into aligned buffer */
1743 memcpy(aligned_buf, buf, len);
1746 /* push data from aligned buffer into fifo */
1747 for (i = 0; i < items; ++i)
1748 mci_writel(host, DATA(host->data_offset),
1755 for (; cnt >= 4; cnt -= 4)
1756 mci_writel(host, DATA(host->data_offset), *pdata++);
1759 /* put anything remaining in the part_buf */
1761 dw_mci_set_part_bytes(host, buf, cnt);
1762 /* Push data if we have reached the expected data length */
1763 if ((data->bytes_xfered + init_cnt) ==
1764 (data->blksz * data->blocks))
1765 mci_writel(host, DATA(host->data_offset),
1770 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1772 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1773 if (unlikely((unsigned long)buf & 0x3)) {
1775 /* pull data from fifo into aligned buffer */
1776 u32 aligned_buf[32];
1777 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1778 int items = len >> 2;
1780 for (i = 0; i < items; ++i)
1781 aligned_buf[i] = mci_readl(host,
1782 DATA(host->data_offset));
1783 /* memcpy from aligned buffer into output buffer */
1784 memcpy(buf, aligned_buf, len);
1792 for (; cnt >= 4; cnt -= 4)
1793 *pdata++ = mci_readl(host, DATA(host->data_offset));
1797 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1798 dw_mci_pull_final_bytes(host, buf, cnt);
1802 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1804 struct mmc_data *data = host->data;
1807 /* try and push anything in the part_buf */
1808 if (unlikely(host->part_buf_count)) {
1809 int len = dw_mci_push_part_bytes(host, buf, cnt);
1813 if (host->part_buf_count == 8) {
1814 mci_writeq(host, DATA(host->data_offset),
1816 host->part_buf_count = 0;
1819 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1820 if (unlikely((unsigned long)buf & 0x7)) {
1822 u64 aligned_buf[16];
1823 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1824 int items = len >> 3;
1826 /* memcpy from input buffer into aligned buffer */
1827 memcpy(aligned_buf, buf, len);
1830 /* push data from aligned buffer into fifo */
1831 for (i = 0; i < items; ++i)
1832 mci_writeq(host, DATA(host->data_offset),
1839 for (; cnt >= 8; cnt -= 8)
1840 mci_writeq(host, DATA(host->data_offset), *pdata++);
1843 /* put anything remaining in the part_buf */
1845 dw_mci_set_part_bytes(host, buf, cnt);
1846 /* Push data if we have reached the expected data length */
1847 if ((data->bytes_xfered + init_cnt) ==
1848 (data->blksz * data->blocks))
1849 mci_writeq(host, DATA(host->data_offset),
1854 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1856 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1857 if (unlikely((unsigned long)buf & 0x7)) {
1859 /* pull data from fifo into aligned buffer */
1860 u64 aligned_buf[16];
1861 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1862 int items = len >> 3;
1864 for (i = 0; i < items; ++i)
1865 aligned_buf[i] = mci_readq(host,
1866 DATA(host->data_offset));
1867 /* memcpy from aligned buffer into output buffer */
1868 memcpy(buf, aligned_buf, len);
1876 for (; cnt >= 8; cnt -= 8)
1877 *pdata++ = mci_readq(host, DATA(host->data_offset));
1881 host->part_buf = mci_readq(host, DATA(host->data_offset));
1882 dw_mci_pull_final_bytes(host, buf, cnt);
1886 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1890 /* get remaining partial bytes */
1891 len = dw_mci_pull_part_bytes(host, buf, cnt);
1892 if (unlikely(len == cnt))
1897 /* get the rest of the data */
1898 host->pull_data(host, buf, cnt);
1901 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1903 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1905 unsigned int offset;
1906 struct mmc_data *data = host->data;
1907 int shift = host->data_shift;
1910 unsigned int remain, fcnt;
1913 if (!sg_miter_next(sg_miter))
1916 host->sg = sg_miter->piter.sg;
1917 buf = sg_miter->addr;
1918 remain = sg_miter->length;
1922 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1923 << shift) + host->part_buf_count;
1924 len = min(remain, fcnt);
1927 dw_mci_pull_data(host, (void *)(buf + offset), len);
1928 data->bytes_xfered += len;
1933 sg_miter->consumed = offset;
1934 status = mci_readl(host, MINTSTS);
1935 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1936 /* if the RXDR is ready read again */
1937 } while ((status & SDMMC_INT_RXDR) ||
1938 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1941 if (!sg_miter_next(sg_miter))
1943 sg_miter->consumed = 0;
1945 sg_miter_stop(sg_miter);
1949 sg_miter_stop(sg_miter);
1952 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1955 static void dw_mci_write_data_pio(struct dw_mci *host)
1957 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1959 unsigned int offset;
1960 struct mmc_data *data = host->data;
1961 int shift = host->data_shift;
1964 unsigned int fifo_depth = host->fifo_depth;
1965 unsigned int remain, fcnt;
1968 if (!sg_miter_next(sg_miter))
1971 host->sg = sg_miter->piter.sg;
1972 buf = sg_miter->addr;
1973 remain = sg_miter->length;
1977 fcnt = ((fifo_depth -
1978 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1979 << shift) - host->part_buf_count;
1980 len = min(remain, fcnt);
1983 host->push_data(host, (void *)(buf + offset), len);
1984 data->bytes_xfered += len;
1989 sg_miter->consumed = offset;
1990 status = mci_readl(host, MINTSTS);
1991 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1992 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1995 if (!sg_miter_next(sg_miter))
1997 sg_miter->consumed = 0;
1999 sg_miter_stop(sg_miter);
2003 sg_miter_stop(sg_miter);
2006 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2009 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2011 if (!host->cmd_status)
2012 host->cmd_status = status;
2016 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2017 tasklet_schedule(&host->tasklet);
2020 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2022 struct dw_mci *host = dev_id;
2023 u32 pending, sdio_int;
2026 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2029 * DTO fix - version 2.10a and below, and only if internal DMA
2032 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2034 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2035 pending |= SDMMC_INT_DATA_OVER;
2039 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2040 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2041 host->cmd_status = pending;
2043 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2046 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2047 /* if there is an error report DATA_ERROR */
2048 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2049 host->data_status = pending;
2051 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2052 tasklet_schedule(&host->tasklet);
2055 if (pending & SDMMC_INT_DATA_OVER) {
2056 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2057 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2058 if (!host->data_status)
2059 host->data_status = pending;
2061 if (host->dir_status == DW_MCI_RECV_STATUS) {
2062 if (host->sg != NULL)
2063 dw_mci_read_data_pio(host, true);
2065 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2066 tasklet_schedule(&host->tasklet);
2069 if (pending & SDMMC_INT_RXDR) {
2070 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2071 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2072 dw_mci_read_data_pio(host, false);
2075 if (pending & SDMMC_INT_TXDR) {
2076 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2077 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2078 dw_mci_write_data_pio(host);
2081 if (pending & SDMMC_INT_CMD_DONE) {
2082 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2083 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2084 dw_mci_cmd_interrupt(host, pending);
2087 if (pending & SDMMC_INT_CD) {
2088 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2089 queue_work(host->card_workqueue, &host->card_work);
2092 /* Handle SDIO Interrupts */
2093 for (i = 0; i < host->num_slots; i++) {
2094 struct dw_mci_slot *slot = host->slot[i];
2096 if (host->verid < DW_MMC_240A)
2097 sdio_int = SDMMC_INT_SDIO(i);
2099 sdio_int = SDMMC_INT_SDIO(i + 8);
2101 if (pending & sdio_int) {
2102 mci_writel(host, RINTSTS, sdio_int);
2103 mmc_signal_sdio_irq(slot->mmc);
2109 #ifdef CONFIG_MMC_DW_IDMAC
2110 /* Handle DMA interrupts */
2111 pending = mci_readl(host, IDSTS);
2112 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2113 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2114 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2115 host->dma_ops->complete(host);
2122 static void dw_mci_work_routine_card(struct work_struct *work)
2124 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2127 for (i = 0; i < host->num_slots; i++) {
2128 struct dw_mci_slot *slot = host->slot[i];
2129 struct mmc_host *mmc = slot->mmc;
2130 struct mmc_request *mrq;
2134 present = dw_mci_get_cd(mmc);
2135 while (present != slot->last_detect_state) {
2136 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2137 present ? "inserted" : "removed");
2138 MMC_DBG_BOOT_FUNC(mmc, "card %s, devname=%s \n",
2139 present ? "inserted" : "removed", mmc_hostname(mmc));
2141 spin_lock_bh(&host->lock);
2143 /* Card change detected */
2144 slot->last_detect_state = present;
2146 /* Clean up queue if present */
2149 if (mrq == host->mrq) {
2153 switch (host->state) {
2156 case STATE_SENDING_CMD:
2157 mrq->cmd->error = -ENOMEDIUM;
2161 case STATE_SENDING_DATA:
2162 mrq->data->error = -ENOMEDIUM;
2163 dw_mci_stop_dma(host);
2165 case STATE_DATA_BUSY:
2166 case STATE_DATA_ERROR:
2167 if (mrq->data->error == -EINPROGRESS)
2168 mrq->data->error = -ENOMEDIUM;
2172 case STATE_SENDING_STOP:
2173 mrq->stop->error = -ENOMEDIUM;
2177 dw_mci_request_end(host, mrq);
2179 list_del(&slot->queue_node);
2180 mrq->cmd->error = -ENOMEDIUM;
2182 mrq->data->error = -ENOMEDIUM;
2184 mrq->stop->error = -ENOMEDIUM;
2186 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(mmc));
2188 spin_unlock(&host->lock);
2189 mmc_request_done(slot->mmc, mrq);
2190 spin_lock(&host->lock);
2194 /* Power down slot */
2198 * Clear down the FIFO - doing so generates a
2199 * block interrupt, hence setting the
2200 * scatter-gather pointer to NULL.
2202 sg_miter_stop(&host->sg_miter);
2205 ctrl = mci_readl(host, CTRL);
2206 ctrl |= SDMMC_CTRL_FIFO_RESET;
2207 mci_writel(host, CTRL, ctrl);
2209 #ifdef CONFIG_MMC_DW_IDMAC
2210 dw_mci_idmac_reset(host);
2215 spin_unlock_bh(&host->lock);
2217 present = dw_mci_get_cd(mmc);
2220 mmc_detect_change(slot->mmc,
2221 msecs_to_jiffies(host->pdata->detect_delay_ms));
2226 /* given a slot id, find out the device node representing that slot */
2227 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2229 struct device_node *np;
2233 if (!dev || !dev->of_node)
2236 for_each_child_of_node(dev->of_node, np) {
2237 addr = of_get_property(np, "reg", &len);
2238 if (!addr || (len < sizeof(int)))
2240 if (be32_to_cpup(addr) == slot)
2246 static struct dw_mci_of_slot_quirks {
2249 } of_slot_quirks[] = {
2251 .quirk = "disable-wp",
2252 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2256 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2258 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2263 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2264 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2265 quirks |= of_slot_quirks[idx].id;
2270 /* find out bus-width for a given slot */
2271 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2273 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2279 if (of_property_read_u32(np, "bus-width", &bus_wd))
2280 dev_err(dev, "bus-width property not found, assuming width"
2286 /* find the pwr-en gpio for a given slot; or -1 if none specified */
2287 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
2289 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2295 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
2297 /* Having a missing entry is valid; return silently */
2298 if (!gpio_is_valid(gpio))
2301 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
2302 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2306 gpio_direction_output(gpio, 0);//set 0 to pwr-en
2312 /* find the write protect gpio for a given slot; or -1 if none specified */
2313 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2315 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2321 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2323 /* Having a missing entry is valid; return silently */
2324 if (!gpio_is_valid(gpio))
2327 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2328 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2335 /* find the cd gpio for a given slot */
2336 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2337 struct mmc_host *mmc)
2339 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2345 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2347 /* Having a missing entry is valid; return silently */
2348 if (!gpio_is_valid(gpio))
2351 if (mmc_gpio_request_cd(mmc, gpio, 0))
2352 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2354 #else /* CONFIG_OF */
2355 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2359 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2363 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2367 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2371 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2372 struct mmc_host *mmc)
2376 #endif /* CONFIG_OF */
2378 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2380 struct mmc_host *mmc;
2381 struct dw_mci_slot *slot;
2382 const struct dw_mci_drv_data *drv_data = host->drv_data;
2387 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2391 slot = mmc_priv(mmc);
2395 host->slot[id] = slot;
2398 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2400 mmc->ops = &dw_mci_ops;
2402 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
2403 mmc->f_max = host->bus_hz;
2404 printk("%d..%s: fmin=%d, fmax=%d, bus_hz=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max, host->bus_hz);
2406 if (of_property_read_u32_array(host->dev->of_node,
2407 "clock-freq-min-max", freq, 2)) {
2408 mmc->f_min = DW_MCI_FREQ_MIN;
2409 mmc->f_max = DW_MCI_FREQ_MAX;
2411 printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);
2413 mmc->f_min = freq[0];
2414 mmc->f_max = freq[1];
2416 printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);
2420 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
2421 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
2422 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
2423 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
2424 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
2425 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
2427 if (host->pdata->get_ocr)
2428 mmc->ocr_avail = host->pdata->get_ocr(id);
2430 //mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2431 mmc->ocr_avail = MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|MMC_VDD_30_31
2432 | MMC_VDD_31_32|MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_34_35| MMC_VDD_35_36;
2434 mmc->ocr_avail |= MMC_VDD_26_27 |MMC_VDD_25_26 |MMC_VDD_24_25 |MMC_VDD_23_24
2435 |MMC_VDD_22_23 |MMC_VDD_21_22 |MMC_VDD_20_21 |MMC_VDD_165_195;
2439 * Start with slot power disabled, it will be enabled when a card
2442 if (host->pdata->setpower)
2443 host->pdata->setpower(id, 0);
2445 if (host->pdata->caps)
2446 mmc->caps = host->pdata->caps;
2448 if (host->pdata->pm_caps)
2449 mmc->pm_caps = host->pdata->pm_caps;
2451 if (host->dev->of_node) {
2452 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2456 ctrl_id = to_platform_device(host->dev)->id;
2458 if (drv_data && drv_data->caps)
2459 mmc->caps |= drv_data->caps[ctrl_id];
2460 if (drv_data && drv_data->hold_reg_flag)
2461 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
2463 if (host->pdata->caps2)
2464 mmc->caps2 = host->pdata->caps2;
2466 if (host->pdata->get_bus_wd)
2467 bus_width = host->pdata->get_bus_wd(slot->id);
2468 else if (host->dev->of_node)
2469 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2473 switch (bus_width) {
2475 mmc->caps |= MMC_CAP_8_BIT_DATA;
2477 mmc->caps |= MMC_CAP_4_BIT_DATA;
2479 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
2480 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
2481 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
2482 mmc->caps |= MMC_CAP_SDIO_IRQ;
2483 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
2484 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
2485 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
2486 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2487 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
2488 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2490 if (host->pdata->blk_settings) {
2491 mmc->max_segs = host->pdata->blk_settings->max_segs;
2492 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2493 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2494 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2495 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2497 /* Useful defaults if platform data is unset. */
2498 #ifdef CONFIG_MMC_DW_IDMAC
2499 mmc->max_segs = host->ring_size;
2500 mmc->max_blk_size = 65536;
2501 mmc->max_blk_count = host->ring_size;
2502 mmc->max_seg_size = 0x1000;
2503 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2506 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2507 mmc->max_blk_count = 512;
2508 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2509 mmc->max_seg_size = mmc->max_req_size;
2510 #endif /* CONFIG_MMC_DW_IDMAC */
2513 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
2515 if (gpio_is_valid(slot->pwr_en_gpio))
2520 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
2521 if (IS_ERR(host->vmmc)) {
2522 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2525 ret = regulator_enable(host->vmmc);
2528 "failed to enable regulator: %d\n", ret);
2533 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2534 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2536 ret = mmc_add_host(mmc);
2540 #if defined(CONFIG_DEBUG_FS)
2541 dw_mci_init_debugfs(slot);
2544 /* Card initially undetected */
2545 slot->last_detect_state = 0;
2554 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2556 /* Shutdown detect IRQ */
2557 if (slot->host->pdata->exit)
2558 slot->host->pdata->exit(id);
2560 /* Debugfs stuff is cleaned up by mmc core */
2561 mmc_remove_host(slot->mmc);
2562 slot->host->slot[id] = NULL;
2563 mmc_free_host(slot->mmc);
2566 static void dw_mci_init_dma(struct dw_mci *host)
2568 /* Alloc memory for sg translation */
2569 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2570 &host->sg_dma, GFP_KERNEL);
2571 if (!host->sg_cpu) {
2572 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2577 /* Determine which DMA interface to use */
2578 #ifdef CONFIG_MMC_DW_IDMAC
2579 host->dma_ops = &dw_mci_idmac_ops;
2580 dev_info(host->dev, "Using internal DMA controller.\n");
2586 if (host->dma_ops->init && host->dma_ops->start &&
2587 host->dma_ops->stop && host->dma_ops->cleanup) {
2588 if (host->dma_ops->init(host)) {
2589 dev_err(host->dev, "%s: Unable to initialize "
2590 "DMA Controller.\n", __func__);
2594 dev_err(host->dev, "DMA initialization not found.\n");
2602 dev_info(host->dev, "Using PIO mode.\n");
2607 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2609 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2612 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2613 SDMMC_CTRL_DMA_RESET));
2615 /* wait till resets clear */
2617 ctrl = mci_readl(host, CTRL);
2618 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2619 SDMMC_CTRL_DMA_RESET)))
2621 } while (time_before(jiffies, timeout));
2623 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2629 static struct dw_mci_of_quirks {
2634 .quirk = "broken-cd",
2635 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2639 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2641 struct dw_mci_board *pdata;
2642 struct device *dev = host->dev;
2643 struct device_node *np = dev->of_node;
2644 const struct dw_mci_drv_data *drv_data = host->drv_data;
2646 u32 clock_frequency;
2648 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2650 dev_err(dev, "could not allocate memory for pdata\n");
2651 return ERR_PTR(-ENOMEM);
2654 /* find out number of slots supported */
2655 if (of_property_read_u32(dev->of_node, "num-slots",
2656 &pdata->num_slots)) {
2657 dev_info(dev, "num-slots property not found, "
2658 "assuming 1 slot is available\n");
2659 pdata->num_slots = 1;
2663 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2664 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2665 pdata->quirks |= of_quirks[idx].id;
2667 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2668 dev_info(dev, "fifo-depth property not found, using "
2669 "value of FIFOTH register as default\n");
2671 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2673 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2674 pdata->bus_hz = clock_frequency;
2676 if (drv_data && drv_data->parse_dt) {
2677 ret = drv_data->parse_dt(host);
2679 return ERR_PTR(ret);
2682 if (of_find_property(np, "keep-power-in-suspend", NULL))
2683 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2687 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2688 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2690 if (of_find_property(np, "supports-highspeed", NULL))
2691 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2693 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2694 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2696 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2697 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2699 if (of_get_property(np, "cd-inverted", NULL))
2700 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2701 if (of_get_property(np, "bootpart-no-access", NULL))
2702 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
2707 #else /* CONFIG_OF */
2708 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2710 return ERR_PTR(-EINVAL);
2712 #endif /* CONFIG_OF */
2714 int dw_mci_probe(struct dw_mci *host)
2716 const struct dw_mci_drv_data *drv_data = host->drv_data;
2717 int width, i, ret = 0;
2722 host->pdata = dw_mci_parse_dt(host);
2723 if (IS_ERR(host->pdata)) {
2724 dev_err(host->dev, "platform data not available\n");
2729 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2731 "Platform data must supply select_slot function\n");
2735 host->biu_clk = devm_clk_get(host->dev, "biu");
2736 if (IS_ERR(host->biu_clk)) {
2737 dev_dbg(host->dev, "biu clock not available\n");
2739 ret = clk_prepare_enable(host->biu_clk);
2741 dev_err(host->dev, "failed to enable biu clock\n");
2746 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2747 if (IS_ERR(host->ciu_clk)) {
2748 dev_dbg(host->dev, "ciu clock not available\n");
2749 host->bus_hz = host->pdata->bus_hz;
2751 ret = clk_prepare_enable(host->ciu_clk);
2753 dev_err(host->dev, "failed to enable ciu clock\n");
2759 //test, modify by xbw
2760 host->bus_hz = 50000000;
2762 if (drv_data && drv_data->init) {
2763 ret = drv_data->init(host);
2766 "implementation specific init failed\n");
2769 host->bus_hz = clk_get_rate(host->ciu_clk);
2771 if (drv_data && drv_data->setup_clock) {
2772 ret = drv_data->setup_clock(host);
2775 "implementation specific clock setup failed\n");
2780 if (!host->bus_hz) {
2782 "Platform data must supply bus speed\n");
2787 host->quirks = host->pdata->quirks;
2789 spin_lock_init(&host->lock);
2790 INIT_LIST_HEAD(&host->queue);
2793 * Get the host data width - this assumes that HCON has been set with
2794 * the correct values.
2796 i = (mci_readl(host, HCON) >> 7) & 0x7;
2798 host->push_data = dw_mci_push_data16;
2799 host->pull_data = dw_mci_pull_data16;
2801 host->data_shift = 1;
2802 } else if (i == 2) {
2803 host->push_data = dw_mci_push_data64;
2804 host->pull_data = dw_mci_pull_data64;
2806 host->data_shift = 3;
2808 /* Check for a reserved value, and warn if it is */
2810 "HCON reports a reserved host data width!\n"
2811 "Defaulting to 32-bit access.\n");
2812 host->push_data = dw_mci_push_data32;
2813 host->pull_data = dw_mci_pull_data32;
2815 host->data_shift = 2;
2818 /* Reset all blocks */
2819 if (!mci_wait_reset(host->dev, host))
2822 host->dma_ops = host->pdata->dma_ops;
2823 dw_mci_init_dma(host);
2825 /* Clear the interrupts for the host controller */
2826 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2827 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2829 /* Put in max timeout */
2830 mci_writel(host, TMOUT, 0xFFFFFFFF);
2833 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2834 * Tx Mark = fifo_size / 2 DMA Size = 8
2836 if (!host->pdata->fifo_depth) {
2838 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2839 * have been overwritten by the bootloader, just like we're
2840 * about to do, so if you know the value for your hardware, you
2841 * should put it in the platform data.
2843 fifo_size = mci_readl(host, FIFOTH);
2844 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2846 fifo_size = host->pdata->fifo_depth;
2848 host->fifo_depth = fifo_size;
2850 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2851 mci_writel(host, FIFOTH, host->fifoth_val);
2853 /* disable clock to CIU */
2854 mci_writel(host, CLKENA, 0);
2855 mci_writel(host, CLKSRC, 0);
2858 * In 2.40a spec, Data offset is changed.
2859 * Need to check the version-id and set data-offset for DATA register.
2861 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2862 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2864 if (host->verid < DW_MMC_240A)
2865 host->data_offset = DATA_OFFSET;
2867 host->data_offset = DATA_240A_OFFSET;
2869 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2870 host->card_workqueue = alloc_workqueue("dw-mci-card",
2871 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2872 if (!host->card_workqueue) {
2876 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2877 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2878 host->irq_flags, "dw-mci", host);
2882 if (host->pdata->num_slots)
2883 host->num_slots = host->pdata->num_slots;
2885 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2888 * Enable interrupts for command done, data over, data empty, card det,
2889 * receive ready and error such as transmit, receive timeout, crc error
2891 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2892 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2893 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2894 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2895 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2897 dev_info(host->dev, "DW MMC controller at irq %d, "
2898 "%d bit host data width, "
2900 host->irq, width, fifo_size);
2902 /* We need at least one slot to succeed */
2903 for (i = 0; i < host->num_slots; i++) {
2904 ret = dw_mci_init_slot(host, i);
2906 dev_dbg(host->dev, "slot %d init failed\n", i);
2912 dev_info(host->dev, "%d slots initialized\n", init_slots);
2914 dev_dbg(host->dev, "attempted to initialize %d slots, "
2915 "but failed on all\n", host->num_slots);
2920 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2921 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2926 destroy_workqueue(host->card_workqueue);
2929 if (host->use_dma && host->dma_ops->exit)
2930 host->dma_ops->exit(host);
2933 regulator_disable(host->vmmc);
2936 if (!IS_ERR(host->ciu_clk))
2937 clk_disable_unprepare(host->ciu_clk);
2940 if (!IS_ERR(host->biu_clk))
2941 clk_disable_unprepare(host->biu_clk);
2945 EXPORT_SYMBOL(dw_mci_probe);
2947 void dw_mci_remove(struct dw_mci *host)
2951 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2952 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2954 for (i = 0; i < host->num_slots; i++) {
2955 dev_dbg(host->dev, "remove slot %d\n", i);
2957 dw_mci_cleanup_slot(host->slot[i], i);
2960 /* disable clock to CIU */
2961 mci_writel(host, CLKENA, 0);
2962 mci_writel(host, CLKSRC, 0);
2964 destroy_workqueue(host->card_workqueue);
2966 if (host->use_dma && host->dma_ops->exit)
2967 host->dma_ops->exit(host);
2970 regulator_disable(host->vmmc);
2972 if (!IS_ERR(host->ciu_clk))
2973 clk_disable_unprepare(host->ciu_clk);
2975 if (!IS_ERR(host->biu_clk))
2976 clk_disable_unprepare(host->biu_clk);
2978 EXPORT_SYMBOL(dw_mci_remove);
2982 #ifdef CONFIG_PM_SLEEP
2984 * TODO: we should probably disable the clock to the card in the suspend path.
2986 int dw_mci_suspend(struct dw_mci *host)
2990 for (i = 0; i < host->num_slots; i++) {
2991 struct dw_mci_slot *slot = host->slot[i];
2994 ret = mmc_suspend_host(slot->mmc);
2997 slot = host->slot[i];
2999 mmc_resume_host(host->slot[i]->mmc);
3006 regulator_disable(host->vmmc);
3010 EXPORT_SYMBOL(dw_mci_suspend);
3012 int dw_mci_resume(struct dw_mci *host)
3017 ret = regulator_enable(host->vmmc);
3020 "failed to enable regulator: %d\n", ret);
3025 if (!mci_wait_reset(host->dev, host)) {
3030 if (host->use_dma && host->dma_ops->init)
3031 host->dma_ops->init(host);
3034 * Restore the initial value at FIFOTH register
3035 * And Invalidate the prev_blksz with zero
3037 mci_writel(host, FIFOTH, host->fifoth_val);
3038 host->prev_blksz = 0;
3039 /* Put in max timeout */
3040 mci_writel(host, TMOUT, 0xFFFFFFFF);
3042 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3043 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3044 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3045 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
3046 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3048 for (i = 0; i < host->num_slots; i++) {
3049 struct dw_mci_slot *slot = host->slot[i];
3052 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3053 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3054 dw_mci_setup_bus(slot, true);
3057 // ret = mmc_resume_host(host->slot[i]->mmc);
3063 EXPORT_SYMBOL(dw_mci_resume);
3064 #endif /* CONFIG_PM_SLEEP */
3066 static int __init dw_mci_init(void)
3068 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3072 static void __exit dw_mci_exit(void)
3076 module_init(dw_mci_init);
3077 module_exit(dw_mci_exit);
3079 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3081 MODULE_AUTHOR("NXP Semiconductor VietNam");
3082 MODULE_AUTHOR("Imagination Technologies Ltd");
3083 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
3085 MODULE_LICENSE("GPL v2");