2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/log2.h>
51 #include "rk_sdmmc_dbg.h"
52 #include <linux/regulator/rockchip_io_vol_domain.h>
53 #include "../../clk/rockchip/clk-ops.h"
55 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
57 /* Common flag combinations */
58 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
59 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
61 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
63 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
64 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
65 #define DW_MCI_SEND_STATUS 1
66 #define DW_MCI_RECV_STATUS 2
67 #define DW_MCI_DMA_THRESHOLD 16
69 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
70 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
72 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
73 #define SDMMC_DATA_TIMEOUT_SD 500
74 #define SDMMC_DATA_TIMEOUT_SDIO 250
75 #define SDMMC_DATA_TIMEOUT_EMMC 2500
77 #define SDMMC_CMD_RTO_MAX_HOLD 200
78 #define SDMMC_WAIT_FOR_UNBUSY 2500
80 #ifdef CONFIG_MMC_DW_IDMAC
81 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
82 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
83 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
87 u32 des0; /* Control Descriptor */
88 #define IDMAC_DES0_DIC BIT(1)
89 #define IDMAC_DES0_LD BIT(2)
90 #define IDMAC_DES0_FD BIT(3)
91 #define IDMAC_DES0_CH BIT(4)
92 #define IDMAC_DES0_ER BIT(5)
93 #define IDMAC_DES0_CES BIT(30)
94 #define IDMAC_DES0_OWN BIT(31)
96 u32 des1; /* Buffer sizes */
97 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
98 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
100 u32 des2; /* buffer 1 physical address */
102 u32 des3; /* buffer 2 physical address */
104 #endif /* CONFIG_MMC_DW_IDMAC */
106 static const u8 tuning_blk_pattern_4bit[] = {
107 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
108 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
109 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
110 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
111 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
112 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
113 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
114 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
117 static const u8 tuning_blk_pattern_8bit[] = {
118 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
119 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
120 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
121 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
122 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
123 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
124 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
125 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
126 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
127 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
128 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
129 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
130 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
131 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
132 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
133 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
136 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
137 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
138 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
139 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
141 /*printk the all register of current host*/
143 static int dw_mci_regs_printk(struct dw_mci *host)
145 struct sdmmc_reg *regs = dw_mci_regs;
147 while( regs->name != 0 ){
148 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
151 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
156 #if defined(CONFIG_DEBUG_FS)
157 static int dw_mci_req_show(struct seq_file *s, void *v)
159 struct dw_mci_slot *slot = s->private;
160 struct mmc_request *mrq;
161 struct mmc_command *cmd;
162 struct mmc_command *stop;
163 struct mmc_data *data;
165 /* Make sure we get a consistent snapshot */
166 spin_lock_bh(&slot->host->lock);
176 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
177 cmd->opcode, cmd->arg, cmd->flags,
178 cmd->resp[0], cmd->resp[1], cmd->resp[2],
179 cmd->resp[2], cmd->error);
181 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
182 data->bytes_xfered, data->blocks,
183 data->blksz, data->flags, data->error);
186 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
187 stop->opcode, stop->arg, stop->flags,
188 stop->resp[0], stop->resp[1], stop->resp[2],
189 stop->resp[2], stop->error);
192 spin_unlock_bh(&slot->host->lock);
197 static int dw_mci_req_open(struct inode *inode, struct file *file)
199 return single_open(file, dw_mci_req_show, inode->i_private);
202 static const struct file_operations dw_mci_req_fops = {
203 .owner = THIS_MODULE,
204 .open = dw_mci_req_open,
207 .release = single_release,
210 static int dw_mci_regs_show(struct seq_file *s, void *v)
212 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
213 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
214 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
215 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
216 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
217 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
222 static int dw_mci_regs_open(struct inode *inode, struct file *file)
224 return single_open(file, dw_mci_regs_show, inode->i_private);
227 static const struct file_operations dw_mci_regs_fops = {
228 .owner = THIS_MODULE,
229 .open = dw_mci_regs_open,
232 .release = single_release,
235 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
237 struct mmc_host *mmc = slot->mmc;
238 struct dw_mci *host = slot->host;
242 root = mmc->debugfs_root;
246 node = debugfs_create_file("regs", S_IRUSR, root, host,
251 node = debugfs_create_file("req", S_IRUSR, root, slot,
256 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
260 node = debugfs_create_x32("pending_events", S_IRUSR, root,
261 (u32 *)&host->pending_events);
265 node = debugfs_create_x32("completed_events", S_IRUSR, root,
266 (u32 *)&host->completed_events);
273 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
275 #endif /* defined(CONFIG_DEBUG_FS) */
277 static void dw_mci_set_timeout(struct dw_mci *host)
279 /* timeout (maximum) */
280 mci_writel(host, TMOUT, 0xffffffff);
283 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
285 struct mmc_data *data;
286 struct dw_mci_slot *slot = mmc_priv(mmc);
287 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
289 cmd->error = -EINPROGRESS;
293 if (cmdr == MMC_STOP_TRANSMISSION)
294 cmdr |= SDMMC_CMD_STOP;
296 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
298 if (cmd->flags & MMC_RSP_PRESENT) {
299 /* We expect a response, so set this bit */
300 cmdr |= SDMMC_CMD_RESP_EXP;
301 if (cmd->flags & MMC_RSP_136)
302 cmdr |= SDMMC_CMD_RESP_LONG;
305 if (cmd->flags & MMC_RSP_CRC)
306 cmdr |= SDMMC_CMD_RESP_CRC;
310 cmdr |= SDMMC_CMD_DAT_EXP;
311 if (data->flags & MMC_DATA_STREAM)
312 cmdr |= SDMMC_CMD_STRM_MODE;
313 if (data->flags & MMC_DATA_WRITE)
314 cmdr |= SDMMC_CMD_DAT_WR;
317 if (drv_data && drv_data->prepare_command)
318 drv_data->prepare_command(slot->host, &cmdr);
324 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
326 struct mmc_command *stop;
332 stop = &host->stop_abort;
334 memset(stop, 0, sizeof(struct mmc_command));
336 if (cmdr == MMC_READ_SINGLE_BLOCK ||
337 cmdr == MMC_READ_MULTIPLE_BLOCK ||
338 cmdr == MMC_WRITE_BLOCK ||
339 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
340 stop->opcode = MMC_STOP_TRANSMISSION;
342 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
343 } else if (cmdr == SD_IO_RW_EXTENDED) {
344 stop->opcode = SD_IO_RW_DIRECT;
345 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
346 ((cmd->arg >> 28) & 0x7);
347 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
352 cmdr = stop->opcode | SDMMC_CMD_STOP |
353 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
358 static void dw_mci_start_command(struct dw_mci *host,
359 struct mmc_command *cmd, u32 cmd_flags)
361 struct dw_mci_slot *slot = host->slot[0];
362 /*temporality fix slot[0] due to host->num_slots equal to 1*/
364 host->pre_cmd = host->cmd;
367 "start command: ARGR=0x%08x CMDR=0x%08x\n",
368 cmd->arg, cmd_flags);
370 if(SD_SWITCH_VOLTAGE == cmd->opcode){
371 /*confirm non-low-power mode*/
372 mci_writel(host, CMDARG, 0);
373 dw_mci_disable_low_power(slot);
375 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
376 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
378 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
381 mci_writel(host, CMDARG, cmd->arg);
384 /* fix the value to 1 in some Soc,for example RK3188. */
385 if(host->mmc->hold_reg_flag)
386 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
388 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
392 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
394 dw_mci_start_command(host, data->stop, host->stop_cmdr);
397 /* DMA interface functions */
398 static void dw_mci_stop_dma(struct dw_mci *host)
400 if (host->using_dma) {
401 /* Fixme: No need to terminate edma, may cause flush op */
402 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
403 host->dma_ops->stop(host);
404 host->dma_ops->cleanup(host);
407 /* Data transfer was stopped by the interrupt handler */
408 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
411 static int dw_mci_get_dma_dir(struct mmc_data *data)
413 if (data->flags & MMC_DATA_WRITE)
414 return DMA_TO_DEVICE;
416 return DMA_FROM_DEVICE;
419 #ifdef CONFIG_MMC_DW_IDMAC
420 static void dw_mci_dma_cleanup(struct dw_mci *host)
422 struct mmc_data *data = host->data;
425 if (!data->host_cookie)
426 dma_unmap_sg(host->dev,
429 dw_mci_get_dma_dir(data));
432 static void dw_mci_idmac_reset(struct dw_mci *host)
434 u32 bmod = mci_readl(host, BMOD);
435 /* Software reset of DMA */
436 bmod |= SDMMC_IDMAC_SWRESET;
437 mci_writel(host, BMOD, bmod);
440 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
444 /* Disable and reset the IDMAC interface */
445 temp = mci_readl(host, CTRL);
446 temp &= ~SDMMC_CTRL_USE_IDMAC;
447 temp |= SDMMC_CTRL_DMA_RESET;
448 mci_writel(host, CTRL, temp);
450 /* Stop the IDMAC running */
451 temp = mci_readl(host, BMOD);
452 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
453 temp |= SDMMC_IDMAC_SWRESET;
454 mci_writel(host, BMOD, temp);
457 static void dw_mci_idmac_complete_dma(void *arg)
459 struct dw_mci *host = arg;
460 struct mmc_data *data = host->data;
462 dev_vdbg(host->dev, "DMA complete\n");
465 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
466 host->mrq->cmd->opcode,host->mrq->cmd->arg,
467 data->blocks,data->blksz,mmc_hostname(host->mmc));
470 host->dma_ops->cleanup(host);
473 * If the card was removed, data will be NULL. No point in trying to
474 * send the stop command or waiting for NBUSY in this case.
477 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
478 tasklet_schedule(&host->tasklet);
482 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
486 struct idmac_desc *desc = host->sg_cpu;
488 for (i = 0; i < sg_len; i++, desc++) {
489 unsigned int length = sg_dma_len(&data->sg[i]);
490 u32 mem_addr = sg_dma_address(&data->sg[i]);
492 /* Set the OWN bit and disable interrupts for this descriptor */
493 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
496 IDMAC_SET_BUFFER1_SIZE(desc, length);
498 /* Physical address to DMA to/from */
499 desc->des2 = mem_addr;
502 /* Set first descriptor */
504 desc->des0 |= IDMAC_DES0_FD;
506 /* Set last descriptor */
507 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
508 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
509 desc->des0 |= IDMAC_DES0_LD;
514 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
518 dw_mci_translate_sglist(host, host->data, sg_len);
520 /* Select IDMAC interface */
521 temp = mci_readl(host, CTRL);
522 temp |= SDMMC_CTRL_USE_IDMAC;
523 mci_writel(host, CTRL, temp);
527 /* Enable the IDMAC */
528 temp = mci_readl(host, BMOD);
529 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
530 mci_writel(host, BMOD, temp);
532 /* Start it running */
533 mci_writel(host, PLDMND, 1);
536 static int dw_mci_idmac_init(struct dw_mci *host)
538 struct idmac_desc *p;
541 /* Number of descriptors in the ring buffer */
542 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
544 /* Forward link the descriptor list */
545 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
546 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
548 /* Set the last descriptor as the end-of-ring descriptor */
549 p->des3 = host->sg_dma;
550 p->des0 = IDMAC_DES0_ER;
552 dw_mci_idmac_reset(host);
554 /* Mask out interrupts - get Tx & Rx complete only */
555 mci_writel(host, IDSTS, IDMAC_INT_CLR);
556 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
559 /* Set the descriptor base address */
560 mci_writel(host, DBADDR, host->sg_dma);
564 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
565 .init = dw_mci_idmac_init,
566 .start = dw_mci_idmac_start_dma,
567 .stop = dw_mci_idmac_stop_dma,
568 .complete = dw_mci_idmac_complete_dma,
569 .cleanup = dw_mci_dma_cleanup,
573 static void dw_mci_edma_cleanup(struct dw_mci *host)
575 struct mmc_data *data = host->data;
578 if (!data->host_cookie)
579 dma_unmap_sg(host->dev,
580 data->sg, data->sg_len,
581 dw_mci_get_dma_dir(data));
584 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
586 dmaengine_terminate_all(host->dms->ch);
589 static void dw_mci_edmac_complete_dma(void *arg)
591 struct dw_mci *host = arg;
592 struct mmc_data *data = host->data;
594 dev_vdbg(host->dev, "DMA complete\n");
597 if(data->flags & MMC_DATA_READ)
598 /* Invalidate cache after read */
599 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
600 data->sg_len, DMA_FROM_DEVICE);
602 host->dma_ops->cleanup(host);
605 * If the card was removed, data will be NULL. No point in trying to
606 * send the stop command or waiting for NBUSY in this case.
609 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
610 tasklet_schedule(&host->tasklet);
614 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
616 struct dma_slave_config slave_config;
617 struct dma_async_tx_descriptor *desc = NULL;
618 struct scatterlist *sgl = host->data->sg;
619 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
620 u32 sg_elems = host->data->sg_len;
621 u32 fifoth_val, mburst;
623 u32 idx, rx_wmark, tx_wmark;
626 /* Set external dma config: burst size, burst width*/
627 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
628 slave_config.src_addr = slave_config.dst_addr;
629 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
630 slave_config.src_addr_width = slave_config.dst_addr_width;
632 /* Match FIFO dma burst MSIZE with external dma config*/
633 fifoth_val = mci_readl(host, FIFOTH);
634 mburst = mszs[(fifoth_val >> 28) & 0x7];
636 /* edmac limit burst to 16, but work around for rk3036 to 8 */
637 if (unlikely(cpu_is_rk3036()))
642 if (mburst > burst_limit) {
643 mburst = burst_limit;
644 idx = (ilog2(mburst) > 0) ? (ilog2(mburst) - 1) : 0;
646 rx_wmark = mszs[idx] - 1;
647 tx_wmark = (host->fifo_depth) / 2;
648 fifoth_val = SDMMC_SET_FIFOTH(idx, rx_wmark, tx_wmark);
650 mci_writel(host, FIFOTH, fifoth_val);
653 slave_config.dst_maxburst = mburst;
654 slave_config.src_maxburst = slave_config.dst_maxburst;
656 if(host->data->flags & MMC_DATA_WRITE){
657 slave_config.direction = DMA_MEM_TO_DEV;
658 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
660 dev_err(host->dev, "error in dw_mci edma configuration.\n");
664 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
665 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
667 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
670 /* Set dw_mci_edmac_complete_dma as callback */
671 desc->callback = dw_mci_edmac_complete_dma;
672 desc->callback_param = (void *)host;
673 dmaengine_submit(desc);
675 /* Flush cache before write */
676 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
677 sg_elems, DMA_TO_DEVICE);
678 dma_async_issue_pending(host->dms->ch);
681 slave_config.direction = DMA_DEV_TO_MEM;
682 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
684 dev_err(host->dev, "error in dw_mci edma configuration.\n");
687 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
688 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
690 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
693 /* set dw_mci_edmac_complete_dma as callback */
694 desc->callback = dw_mci_edmac_complete_dma;
695 desc->callback_param = (void *)host;
696 dmaengine_submit(desc);
697 dma_async_issue_pending(host->dms->ch);
701 static int dw_mci_edmac_init(struct dw_mci *host)
703 /* Request external dma channel, SHOULD decide chn in dts */
705 host->dms = (struct dw_mci_dma_slave *)kmalloc
706 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
707 if (NULL == host->dms) {
708 dev_err(host->dev, "No enough memory to alloc dms.\n");
712 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
713 if (!host->dms->ch) {
714 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
715 host->dms->ch->chan_id);
722 if (NULL != host->dms) {
730 static void dw_mci_edmac_exit(struct dw_mci *host)
732 if (NULL != host->dms) {
733 if (NULL != host->dms->ch) {
734 dma_release_channel(host->dms->ch);
735 host->dms->ch = NULL;
742 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
743 .init = dw_mci_edmac_init,
744 .exit = dw_mci_edmac_exit,
745 .start = dw_mci_edmac_start_dma,
746 .stop = dw_mci_edmac_stop_dma,
747 .complete = dw_mci_edmac_complete_dma,
748 .cleanup = dw_mci_edma_cleanup,
750 #endif /* CONFIG_MMC_DW_IDMAC */
752 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
753 struct mmc_data *data,
756 struct scatterlist *sg;
757 unsigned int i, sg_len;
759 if (!next && data->host_cookie)
760 return data->host_cookie;
763 * We don't do DMA on "complex" transfers, i.e. with
764 * non-word-aligned buffers or lengths. Also, we don't bother
765 * with all the DMA setup overhead for short transfers.
767 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
773 for_each_sg(data->sg, sg, data->sg_len, i) {
774 if (sg->offset & 3 || sg->length & 3)
778 sg_len = dma_map_sg(host->dev,
781 dw_mci_get_dma_dir(data));
786 data->host_cookie = sg_len;
791 static void dw_mci_pre_req(struct mmc_host *mmc,
792 struct mmc_request *mrq,
795 struct dw_mci_slot *slot = mmc_priv(mmc);
796 struct mmc_data *data = mrq->data;
798 if (!slot->host->use_dma || !data)
801 if (data->host_cookie) {
802 data->host_cookie = 0;
806 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
807 data->host_cookie = 0;
810 static void dw_mci_post_req(struct mmc_host *mmc,
811 struct mmc_request *mrq,
814 struct dw_mci_slot *slot = mmc_priv(mmc);
815 struct mmc_data *data = mrq->data;
817 if (!slot->host->use_dma || !data)
820 if (data->host_cookie)
821 dma_unmap_sg(slot->host->dev,
824 dw_mci_get_dma_dir(data));
825 data->host_cookie = 0;
828 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
830 #ifdef CONFIG_MMC_DW_IDMAC
831 unsigned int blksz = data->blksz;
832 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
833 u32 fifo_width = 1 << host->data_shift;
834 u32 blksz_depth = blksz / fifo_width, fifoth_val;
835 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
836 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
838 tx_wmark = (host->fifo_depth) / 2;
839 tx_wmark_invers = host->fifo_depth - tx_wmark;
843 * if blksz is not a multiple of the FIFO width
845 if (blksz % fifo_width) {
852 if (!((blksz_depth % mszs[idx]) ||
853 (tx_wmark_invers % mszs[idx]))) {
855 rx_wmark = mszs[idx] - 1;
860 * If idx is '0', it won't be tried
861 * Thus, initial values are uesed
864 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
865 mci_writel(host, FIFOTH, fifoth_val);
870 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
872 unsigned int blksz = data->blksz;
873 u32 blksz_depth, fifo_depth;
876 WARN_ON(!(data->flags & MMC_DATA_READ));
878 if (host->timing != MMC_TIMING_MMC_HS200 &&
879 host->timing != MMC_TIMING_UHS_SDR104)
882 blksz_depth = blksz / (1 << host->data_shift);
883 fifo_depth = host->fifo_depth;
885 if (blksz_depth > fifo_depth)
889 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
890 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
891 * Currently just choose blksz.
894 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
898 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
901 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
909 /* If we don't have a channel, we can't do DMA */
913 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
915 /* Fixme: No need terminate edma, may cause flush op */
916 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
917 host->dma_ops->stop(host);
924 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
925 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
929 * Decide the MSIZE and RX/TX Watermark.
930 * If current block size is same with previous size,
931 * no need to update fifoth.
933 if (host->prev_blksz != data->blksz)
934 dw_mci_adjust_fifoth(host, data);
937 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
939 /* Enable the DMA interface */
940 temp = mci_readl(host, CTRL);
941 temp |= SDMMC_CTRL_DMA_ENABLE;
942 mci_writel(host, CTRL, temp);
944 /* Disable RX/TX IRQs, let DMA handle it */
945 spin_lock_irqsave(&host->slock, flags);
946 temp = mci_readl(host, INTMASK);
947 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
948 mci_writel(host, INTMASK, temp);
949 spin_unlock_irqrestore(&host->slock, flags);
951 host->dma_ops->start(host, sg_len);
956 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
961 data->error = -EINPROGRESS;
963 //WARN_ON(host->data);
968 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
970 if (data->flags & MMC_DATA_READ) {
971 host->dir_status = DW_MCI_RECV_STATUS;
972 dw_mci_ctrl_rd_thld(host, data);
974 host->dir_status = DW_MCI_SEND_STATUS;
977 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
978 data->blocks, data->blksz, mmc_hostname(host->mmc));
980 if (dw_mci_submit_data_dma(host, data)) {
981 int flags = SG_MITER_ATOMIC;
982 if (host->data->flags & MMC_DATA_READ)
983 flags |= SG_MITER_TO_SG;
985 flags |= SG_MITER_FROM_SG;
987 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
989 host->part_buf_start = 0;
990 host->part_buf_count = 0;
992 spin_lock_irqsave(&host->slock, flag);
993 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
994 temp = mci_readl(host, INTMASK);
995 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
996 mci_writel(host, INTMASK, temp);
997 spin_unlock_irqrestore(&host->slock, flag);
999 temp = mci_readl(host, CTRL);
1000 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1001 mci_writel(host, CTRL, temp);
1004 * Use the initial fifoth_val for PIO mode.
1005 * If next issued data may be transfered by DMA mode,
1006 * prev_blksz should be invalidated.
1008 mci_writel(host, FIFOTH, host->fifoth_val);
1009 host->prev_blksz = 0;
1012 * Keep the current block size.
1013 * It will be used to decide whether to update
1014 * fifoth register next time.
1016 host->prev_blksz = data->blksz;
1020 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1022 struct dw_mci *host = slot->host;
1023 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1024 unsigned int cmd_status = 0;
1025 #ifdef SDMMC_WAIT_FOR_UNBUSY
1027 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1029 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1031 ret = time_before(jiffies, timeout);
1032 cmd_status = mci_readl(host, STATUS);
1033 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1037 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1038 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1041 mci_writel(host, CMDARG, arg);
1043 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1044 if(cmd & SDMMC_CMD_UPD_CLK)
1045 timeout = jiffies + msecs_to_jiffies(50);
1047 timeout = jiffies + msecs_to_jiffies(500);
1048 while (time_before(jiffies, timeout)) {
1049 cmd_status = mci_readl(host, CMD);
1050 if (!(cmd_status & SDMMC_CMD_START))
1053 dev_err(&slot->mmc->class_dev,
1054 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1055 cmd, arg, cmd_status);
1058 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1060 struct dw_mci *host = slot->host;
1061 unsigned int tempck,clock = slot->clock;
1066 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1067 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1070 mci_writel(host, CLKENA, 0);
1071 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1072 if(host->svi_flags == 0)
1073 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1075 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1077 } else if (clock != host->current_speed || force_clkinit) {
1078 div = host->bus_hz / clock;
1079 if (host->bus_hz % clock && host->bus_hz > clock)
1081 * move the + 1 after the divide to prevent
1082 * over-clocking the card.
1086 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1088 if ((clock << div) != slot->__clk_old || force_clkinit) {
1089 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1090 dev_info(&slot->mmc->class_dev,
1091 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1092 slot->id, host->bus_hz, clock,
1095 host->set_speed = tempck;
1096 host->set_div = div;
1100 mci_writel(host, CLKENA, 0);
1101 mci_writel(host, CLKSRC, 0);
1105 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1107 if(clock <= 400*1000){
1108 MMC_DBG_BOOT_FUNC(host->mmc,
1109 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1110 clock * 2, mmc_hostname(host->mmc));
1111 /* clk_mmc will change parents to 24MHz xtal*/
1112 clk_set_rate(host->clk_mmc, clock * 2);
1115 host->set_div = div;
1119 MMC_DBG_BOOT_FUNC(host->mmc,
1120 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1121 mmc_hostname(host->mmc));
1124 MMC_DBG_ERR_FUNC(host->mmc,
1125 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1126 mmc_hostname(host->mmc));
1128 host->set_div = div;
1129 host->bus_hz = host->set_speed * 2;
1130 MMC_DBG_BOOT_FUNC(host->mmc,
1131 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1132 div, host->bus_hz, mmc_hostname(host->mmc));
1134 /* BUG may be here, come on, Linux BSP engineer looks!
1135 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1136 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1137 some oops happened like that:
1138 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1139 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1140 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1141 mmc0: new high speed DDR MMC card at address 0001
1142 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1144 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1145 mmcblk0: retrying using single block read
1146 mmcblk0: error -110 sending status command, retrying
1148 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1151 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1152 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1155 host->set_div = div;
1156 host->bus_hz = host->set_speed * 2;
1157 MMC_DBG_BOOT_FUNC(host->mmc,
1158 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1159 div, host->bus_hz, mmc_hostname(host->mmc));
1162 if (host->verid < DW_MMC_240A)
1163 clk_set_rate(host->clk_mmc,(host->bus_hz));
1165 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1171 /* set clock to desired speed */
1172 mci_writel(host, CLKDIV, div);
1176 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1178 /* enable clock; only low power if no SDIO */
1179 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1181 if (host->verid < DW_MMC_240A)
1182 sdio_int = SDMMC_INT_SDIO(slot->id);
1184 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1186 if (!(mci_readl(host, INTMASK) & sdio_int))
1187 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1188 mci_writel(host, CLKENA, clk_en_a);
1192 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1193 /* keep the clock with reflecting clock dividor */
1194 slot->__clk_old = clock << div;
1197 host->current_speed = clock;
1199 if(slot->ctype != slot->pre_ctype)
1200 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1202 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1203 mmc_hostname(host->mmc));
1204 slot->pre_ctype = slot->ctype;
1206 /* Set the current slot bus width */
1207 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1210 extern struct mmc_card *this_card;
1211 static void dw_mci_wait_unbusy(struct dw_mci *host)
1214 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1215 unsigned long time_loop;
1216 unsigned int status;
1219 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1221 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1222 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1223 /* Special care for (secure)erase timeout calculation */
1225 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1228 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1229 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1230 300000 * (this_card->ext_csd.sec_erase_mult)) :
1231 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1235 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1236 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1237 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1238 timeout = SDMMC_DATA_TIMEOUT_SD;
1241 time_loop = jiffies + msecs_to_jiffies(timeout);
1243 status = mci_readl(host, STATUS);
1244 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1246 } while (time_before(jiffies, time_loop));
1251 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1254 * 0--status is busy.
1255 * 1--status is unbusy.
1257 int dw_mci_card_busy(struct mmc_host *mmc)
1259 struct dw_mci_slot *slot = mmc_priv(mmc);
1260 struct dw_mci *host = slot->host;
1262 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1263 host->svi_flags, mmc_hostname(host->mmc));
1266 if(host->svi_flags == 0){
1268 host->svi_flags = 1;
1269 return host->svi_flags;
1272 host->svi_flags = 0;
1273 return host->svi_flags;
1279 static void __dw_mci_start_request(struct dw_mci *host,
1280 struct dw_mci_slot *slot,
1281 struct mmc_command *cmd)
1283 struct mmc_request *mrq;
1284 struct mmc_data *data;
1288 if (host->pdata->select_slot)
1289 host->pdata->select_slot(slot->id);
1291 host->cur_slot = slot;
1294 dw_mci_wait_unbusy(host);
1296 host->pending_events = 0;
1297 host->completed_events = 0;
1298 host->data_status = 0;
1302 dw_mci_set_timeout(host);
1303 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1304 mci_writel(host, BLKSIZ, data->blksz);
1307 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1309 /* this is the first command, send the initialization clock */
1310 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1311 cmdflags |= SDMMC_CMD_INIT;
1314 dw_mci_submit_data(host, data);
1318 dw_mci_start_command(host, cmd, cmdflags);
1321 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1324 static void dw_mci_start_request(struct dw_mci *host,
1325 struct dw_mci_slot *slot)
1327 struct mmc_request *mrq = slot->mrq;
1328 struct mmc_command *cmd;
1330 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1331 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1333 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1334 __dw_mci_start_request(host, slot, cmd);
1337 /* must be called with host->lock held */
1338 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1339 struct mmc_request *mrq)
1341 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1346 if (host->state == STATE_IDLE) {
1347 host->state = STATE_SENDING_CMD;
1348 dw_mci_start_request(host, slot);
1350 list_add_tail(&slot->queue_node, &host->queue);
1354 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1356 struct dw_mci_slot *slot = mmc_priv(mmc);
1357 struct dw_mci *host = slot->host;
1362 * The check for card presence and queueing of the request must be
1363 * atomic, otherwise the card could be removed in between and the
1364 * request wouldn't fail until another card was inserted.
1366 spin_lock_bh(&host->lock);
1368 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1369 spin_unlock_bh(&host->lock);
1370 mrq->cmd->error = -ENOMEDIUM;
1371 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1372 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1374 mmc_request_done(mmc, mrq);
1378 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1379 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1381 dw_mci_queue_request(host, slot, mrq);
1383 spin_unlock_bh(&host->lock);
1386 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1388 struct dw_mci_slot *slot = mmc_priv(mmc);
1389 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1390 struct dw_mci *host = slot->host;
1392 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1395 #ifdef SDMMC_WAIT_FOR_UNBUSY
1396 unsigned long time_loop;
1399 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1400 if(host->svi_flags == 1)
1401 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1403 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1405 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1408 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1409 printk("%d..%s: no card. [%s]\n", \
1410 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1415 ret = time_before(jiffies, time_loop);
1416 regs = mci_readl(slot->host, STATUS);
1417 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1423 printk("slot->flags = %lu ", slot->flags);
1424 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1425 if(host->svi_flags != 1)
1428 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1429 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1433 switch (ios->bus_width) {
1434 case MMC_BUS_WIDTH_4:
1435 slot->ctype = SDMMC_CTYPE_4BIT;
1437 case MMC_BUS_WIDTH_8:
1438 slot->ctype = SDMMC_CTYPE_8BIT;
1441 /* set default 1 bit mode */
1442 slot->ctype = SDMMC_CTYPE_1BIT;
1443 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1446 regs = mci_readl(slot->host, UHS_REG);
1449 if (ios->timing == MMC_TIMING_UHS_DDR50)
1450 regs |= ((0x1 << slot->id) << 16);
1452 regs &= ~((0x1 << slot->id) << 16);
1454 mci_writel(slot->host, UHS_REG, regs);
1455 slot->host->timing = ios->timing;
1458 * Use mirror of ios->clock to prevent race with mmc
1459 * core ios update when finding the minimum.
1461 slot->clock = ios->clock;
1463 if (drv_data && drv_data->set_ios)
1464 drv_data->set_ios(slot->host, ios);
1466 /* Slot specific timing and width adjustment */
1467 dw_mci_setup_bus(slot, false);
1471 switch (ios->power_mode) {
1473 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1475 if (slot->host->pdata->setpower)
1476 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1477 regs = mci_readl(slot->host, PWREN);
1478 regs |= (1 << slot->id);
1479 mci_writel(slot->host, PWREN, regs);
1482 /* Power down slot */
1483 if(slot->host->pdata->setpower)
1484 slot->host->pdata->setpower(slot->id, 0);
1485 regs = mci_readl(slot->host, PWREN);
1486 regs &= ~(1 << slot->id);
1487 mci_writel(slot->host, PWREN, regs);
1494 static int dw_mci_get_ro(struct mmc_host *mmc)
1497 struct dw_mci_slot *slot = mmc_priv(mmc);
1498 struct dw_mci_board *brd = slot->host->pdata;
1500 /* Use platform get_ro function, else try on board write protect */
1501 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1503 else if(brd->get_ro)
1504 read_only = brd->get_ro(slot->id);
1505 else if(gpio_is_valid(slot->wp_gpio))
1506 read_only = gpio_get_value(slot->wp_gpio);
1509 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1511 dev_dbg(&mmc->class_dev, "card is %s\n",
1512 read_only ? "read-only" : "read-write");
1517 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1519 struct dw_mci_slot *slot = mmc_priv(mmc);
1520 struct dw_mci *host = slot->host;
1521 /*struct dw_mci_board *brd = slot->host->pdata;*/
1523 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1526 spin_lock_bh(&host->lock);
1529 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1531 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1533 spin_unlock_bh(&host->lock);
1535 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1536 if(__clk_is_enabled(host->hclk_mmc) == false)
1537 clk_prepare_enable(host->hclk_mmc);
1538 if(__clk_is_enabled(host->clk_mmc) == false)
1539 clk_prepare_enable(host->clk_mmc);
1541 if(__clk_is_enabled(host->clk_mmc) == true)
1542 clk_disable_unprepare(slot->host->clk_mmc);
1543 if(__clk_is_enabled(host->hclk_mmc) == true)
1544 clk_disable_unprepare(slot->host->hclk_mmc);
1547 mmc_detect_change(slot->mmc, 20);
1553 static int dw_mci_get_cd(struct mmc_host *mmc)
1556 struct dw_mci_slot *slot = mmc_priv(mmc);
1557 struct dw_mci_board *brd = slot->host->pdata;
1558 struct dw_mci *host = slot->host;
1559 int gpio_cd = mmc_gpio_get_cd(mmc);
1560 int force_jtag_bit, force_jtag_reg;
1564 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1565 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1566 gpio_cd = slot->cd_gpio;
1567 irq = gpio_to_irq(gpio_cd);
1568 if (gpio_is_valid(gpio_cd)) {
1569 gpio_val = gpio_get_value(gpio_cd);
1570 if (soc_is_rk3036()) {
1571 force_jtag_bit = 11;
1572 force_jtag_reg = RK312X_GRF_SOC_CON0;
1573 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1574 force_jtag_reg = RK312X_GRF_SOC_CON0;
1578 if (gpio_val == gpio_get_value(gpio_cd)) {
1579 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1581 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1582 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1583 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1586 dw_mci_ctrl_all_reset(host);
1588 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1589 /* Really card detected: SHOULD disable force_jtag */
1590 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1595 gpio_val = gpio_get_value(gpio_cd);
1597 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1598 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1599 return slot->last_detect_state;
1602 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1606 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1607 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1609 /* Use platform get_cd function, else try onboard card detect */
1610 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1612 else if (brd->get_cd)
1613 present = !brd->get_cd(slot->id);
1614 else if (!IS_ERR_VALUE(gpio_cd))
1617 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1620 spin_lock_bh(&host->lock);
1622 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1623 dev_dbg(&mmc->class_dev, "card is present\n");
1625 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1626 dev_dbg(&mmc->class_dev, "card is not present\n");
1628 spin_unlock_bh(&host->lock);
1635 * Dts Should caps emmc controller with poll-hw-reset
1637 static void dw_mci_hw_reset(struct mmc_host *mmc)
1639 struct dw_mci_slot *slot = mmc_priv(mmc);
1640 struct dw_mci *host = slot->host;
1645 unsigned long timeout;
1648 /* (1) CMD12 to end any transfer in process */
1649 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1650 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1652 if(host->mmc->hold_reg_flag)
1653 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1654 mci_writel(host, CMDARG, 0);
1656 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1658 timeout = jiffies + msecs_to_jiffies(500);
1660 ret = time_before(jiffies, timeout);
1661 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1666 MMC_DBG_ERR_FUNC(host->mmc,
1667 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1668 __func__, mmc_hostname(host->mmc));
1670 /* (2) wait DTO, even if no response is sent back by card */
1672 timeout = jiffies + msecs_to_jiffies(5);
1674 ret = time_before(jiffies, timeout);
1675 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1676 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1682 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1684 /* Software reset - BMOD[0] for IDMA only */
1685 regs = mci_readl(host, BMOD);
1686 regs |= SDMMC_IDMAC_SWRESET;
1687 mci_writel(host, BMOD, regs);
1688 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1689 regs = mci_readl(host, BMOD);
1690 if(regs & SDMMC_IDMAC_SWRESET)
1691 MMC_DBG_WARN_FUNC(host->mmc,
1692 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1693 __func__, mmc_hostname(host->mmc));
1695 /* DMA reset - CTRL[2] */
1696 regs = mci_readl(host, CTRL);
1697 regs |= SDMMC_CTRL_DMA_RESET;
1698 mci_writel(host, CTRL, regs);
1699 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1700 regs = mci_readl(host, CTRL);
1701 if(regs & SDMMC_CTRL_DMA_RESET)
1702 MMC_DBG_WARN_FUNC(host->mmc,
1703 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1704 __func__, mmc_hostname(host->mmc));
1706 /* FIFO reset - CTRL[1] */
1707 regs = mci_readl(host, CTRL);
1708 regs |= SDMMC_CTRL_FIFO_RESET;
1709 mci_writel(host, CTRL, regs);
1710 mdelay(1); /* no timing limited, 1ms is random value */
1711 regs = mci_readl(host, CTRL);
1712 if(regs & SDMMC_CTRL_FIFO_RESET)
1713 MMC_DBG_WARN_FUNC(host->mmc,
1714 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1715 __func__, mmc_hostname(host->mmc));
1718 According to eMMC spec
1719 tRstW >= 1us ; RST_n pulse width
1720 tRSCA >= 200us ; RST_n to Command time
1721 tRSTH >= 1us ; RST_n high period
1723 mci_writel(slot->host, PWREN, 0x0);
1724 mci_writel(slot->host, RST_N, 0x0);
1726 udelay(10); /* 10us for bad quality eMMc. */
1728 mci_writel(slot->host, PWREN, 0x1);
1729 mci_writel(slot->host, RST_N, 0x1);
1731 usleep_range(500, 1000); /* at least 500(> 200us) */
1735 * Disable lower power mode.
1737 * Low power mode will stop the card clock when idle. According to the
1738 * description of the CLKENA register we should disable low power mode
1739 * for SDIO cards if we need SDIO interrupts to work.
1741 * This function is fast if low power mode is already disabled.
1743 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1745 struct dw_mci *host = slot->host;
1747 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1749 clk_en_a = mci_readl(host, CLKENA);
1751 if (clk_en_a & clken_low_pwr) {
1752 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1753 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1754 SDMMC_CMD_PRV_DAT_WAIT, 0);
1758 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1760 struct dw_mci_slot *slot = mmc_priv(mmc);
1761 struct dw_mci *host = slot->host;
1762 unsigned long flags;
1766 spin_lock_irqsave(&host->slock, flags);
1768 /* Enable/disable Slot Specific SDIO interrupt */
1769 int_mask = mci_readl(host, INTMASK);
1771 if (host->verid < DW_MMC_240A)
1772 sdio_int = SDMMC_INT_SDIO(slot->id);
1774 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1778 * Turn off low power mode if it was enabled. This is a bit of
1779 * a heavy operation and we disable / enable IRQs a lot, so
1780 * we'll leave low power mode disabled and it will get
1781 * re-enabled again in dw_mci_setup_bus().
1783 dw_mci_disable_low_power(slot);
1785 mci_writel(host, INTMASK,
1786 (int_mask | sdio_int));
1788 mci_writel(host, INTMASK,
1789 (int_mask & ~sdio_int));
1792 spin_unlock_irqrestore(&host->slock, flags);
1795 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1797 IO_DOMAIN_12 = 1200,
1798 IO_DOMAIN_18 = 1800,
1799 IO_DOMAIN_33 = 3300,
1801 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1811 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1812 __FUNCTION__, mmc_hostname(host->mmc));
1815 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1816 __FUNCTION__, mmc_hostname(host->mmc));
1820 if(cpu_is_rk3288()){
1821 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1822 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1826 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1827 __FUNCTION__, mmc_hostname(host->mmc));
1831 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1832 struct mmc_ios *ios)
1835 unsigned int value,uhs_reg;
1838 * Signal Voltage Switching is only applicable for Host Controllers
1841 if (host->verid < DW_MMC_240A)
1844 uhs_reg = mci_readl(host, UHS_REG);
1845 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1846 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1848 switch (ios->signal_voltage) {
1849 case MMC_SIGNAL_VOLTAGE_330:
1850 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1852 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1853 /* regulator_put(host->vmmc); //to be done in remove function. */
1855 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1856 __func__, regulator_get_voltage(host->vmmc), ret);
1858 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1859 " failed\n", mmc_hostname(host->mmc));
1862 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1864 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1865 __FUNCTION__, mmc_hostname(host->mmc));
1867 /* set High-power mode */
1868 value = mci_readl(host, CLKENA);
1869 value &= ~SDMMC_CLKEN_LOW_PWR;
1870 mci_writel(host,CLKENA , value);
1872 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1873 mci_writel(host,UHS_REG , uhs_reg);
1876 usleep_range(5000, 5500);
1878 /* 3.3V regulator output should be stable within 5 ms */
1879 uhs_reg = mci_readl(host, UHS_REG);
1880 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1883 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1884 mmc_hostname(host->mmc));
1887 case MMC_SIGNAL_VOLTAGE_180:
1889 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1890 /* regulator_put(host->vmmc);//to be done in remove function. */
1892 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1893 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1895 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1896 " failed\n", mmc_hostname(host->mmc));
1899 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1903 * Enable 1.8V Signal Enable in the Host Control2
1906 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1909 usleep_range(5000, 5500);
1910 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1911 __FUNCTION__,mmc_hostname(host->mmc));
1913 /* 1.8V regulator output should be stable within 5 ms */
1914 uhs_reg = mci_readl(host, UHS_REG);
1915 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1918 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1919 mmc_hostname(host->mmc));
1922 case MMC_SIGNAL_VOLTAGE_120:
1924 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1926 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1927 " failed\n", mmc_hostname(host->mmc));
1933 /* No signal voltage switch required */
1939 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1940 struct mmc_ios *ios)
1942 struct dw_mci_slot *slot = mmc_priv(mmc);
1943 struct dw_mci *host = slot->host;
1946 if (host->verid < DW_MMC_240A)
1949 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1955 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1957 struct dw_mci_slot *slot = mmc_priv(mmc);
1958 struct dw_mci *host = slot->host;
1959 const struct dw_mci_drv_data *drv_data = host->drv_data;
1960 struct dw_mci_tuning_data tuning_data;
1963 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1964 if(cpu_is_rk3036() || cpu_is_rk312x())
1967 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1968 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1969 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1970 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1971 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1972 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1973 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1977 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1978 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1979 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1982 "Undefined command(%d) for tuning\n", opcode);
1987 /* Recommend sample phase and delayline
1988 Fixme: Mix-use these three controllers will cause
1991 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1992 tuning_data.con_id = 3;
1993 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1994 tuning_data.con_id = 1;
1996 tuning_data.con_id = 0;
1998 /* 0: driver, from host->devices
1999 1: sample, from devices->host
2001 tuning_data.tuning_type = 1;
2003 if (drv_data && drv_data->execute_tuning)
2004 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2009 static void dw_mci_post_tmo(struct mmc_host *mmc)
2011 struct dw_mci_slot *slot = mmc_priv(mmc);
2012 struct dw_mci *host = slot->host;
2013 host->cur_slot->mrq = NULL;
2015 host->state = STATE_IDLE;
2018 static const struct mmc_host_ops dw_mci_ops = {
2019 .request = dw_mci_request,
2020 .pre_req = dw_mci_pre_req,
2021 .post_req = dw_mci_post_req,
2022 .set_ios = dw_mci_set_ios,
2023 .get_ro = dw_mci_get_ro,
2024 .get_cd = dw_mci_get_cd,
2025 .set_sdio_status = dw_mci_set_sdio_status,
2026 .hw_reset = dw_mci_hw_reset,
2027 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2028 .execute_tuning = dw_mci_execute_tuning,
2029 .post_tmo = dw_mci_post_tmo,
2030 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2031 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2032 .card_busy = dw_mci_card_busy,
2037 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2039 unsigned long flags;
2044 local_irq_save(flags);
2045 if(host->irq_state != irqflag)
2047 host->irq_state = irqflag;
2050 enable_irq(host->irq);
2054 disable_irq(host->irq);
2057 local_irq_restore(flags);
2061 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2062 __releases(&host->lock)
2063 __acquires(&host->lock)
2065 if(DW_MCI_SEND_STATUS == host->dir_status){
2067 if( MMC_BUS_TEST_W != host->cmd->opcode){
2068 if(host->data_status & SDMMC_INT_DCRC)
2069 host->data->error = -EILSEQ;
2070 else if(host->data_status & SDMMC_INT_EBE)
2071 host->data->error = -ETIMEDOUT;
2073 dw_mci_wait_unbusy(host);
2076 dw_mci_wait_unbusy(host);
2081 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2082 __releases(&host->lock)
2083 __acquires(&host->lock)
2085 struct dw_mci_slot *slot;
2086 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2088 //WARN_ON(host->cmd || host->data);
2090 dw_mci_deal_data_end(host, mrq);
2093 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2094 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2096 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2097 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2099 host->cur_slot->mrq = NULL;
2101 if (!list_empty(&host->queue)) {
2102 slot = list_entry(host->queue.next,
2103 struct dw_mci_slot, queue_node);
2104 list_del(&slot->queue_node);
2105 dev_vdbg(host->dev, "list not empty: %s is next\n",
2106 mmc_hostname(slot->mmc));
2107 host->state = STATE_SENDING_CMD;
2108 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2109 dw_mci_start_request(host, slot);
2111 dev_vdbg(host->dev, "list empty\n");
2112 host->state = STATE_IDLE;
2115 spin_unlock(&host->lock);
2116 mmc_request_done(prev_mmc, mrq);
2117 spin_lock(&host->lock);
2120 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2122 u32 status = host->cmd_status;
2124 host->cmd_status = 0;
2126 /* Read the response from the card (up to 16 bytes) */
2127 if (cmd->flags & MMC_RSP_PRESENT) {
2128 if (cmd->flags & MMC_RSP_136) {
2129 cmd->resp[3] = mci_readl(host, RESP0);
2130 cmd->resp[2] = mci_readl(host, RESP1);
2131 cmd->resp[1] = mci_readl(host, RESP2);
2132 cmd->resp[0] = mci_readl(host, RESP3);
2134 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2135 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2137 cmd->resp[0] = mci_readl(host, RESP0);
2141 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2142 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2146 if (status & SDMMC_INT_RTO)
2148 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2151 cmd->error = -ETIMEDOUT;
2152 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2153 cmd->error = -EILSEQ;
2154 }else if (status & SDMMC_INT_RESP_ERR){
2159 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2160 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2163 if(MMC_SEND_STATUS != cmd->opcode)
2164 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2165 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2166 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2170 /* newer ip versions need a delay between retries */
2171 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2177 static void dw_mci_tasklet_func(unsigned long priv)
2179 struct dw_mci *host = (struct dw_mci *)priv;
2180 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2181 struct mmc_data *data;
2182 struct mmc_command *cmd;
2183 enum dw_mci_state state;
2184 enum dw_mci_state prev_state;
2185 u32 status, cmd_flags;
2186 unsigned long timeout = 0;
2189 spin_lock(&host->lock);
2191 state = host->state;
2201 case STATE_SENDING_CMD:
2202 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2203 &host->pending_events))
2208 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2209 dw_mci_command_complete(host, cmd);
2210 if (cmd == host->mrq->sbc && !cmd->error) {
2211 prev_state = state = STATE_SENDING_CMD;
2212 __dw_mci_start_request(host, host->cur_slot,
2217 if (cmd->data && cmd->error) {
2218 dw_mci_stop_dma(host);
2221 send_stop_cmd(host, data);
2222 state = STATE_SENDING_STOP;
2225 /* host->data = NULL; */
2228 send_stop_abort(host, data);
2229 state = STATE_SENDING_STOP;
2232 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2235 if (!host->mrq->data || cmd->error) {
2236 dw_mci_request_end(host, host->mrq);
2240 prev_state = state = STATE_SENDING_DATA;
2243 case STATE_SENDING_DATA:
2244 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2245 dw_mci_stop_dma(host);
2248 send_stop_cmd(host, data);
2250 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2251 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2252 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2254 mci_writel(host, CMDARG, 0);
2256 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2257 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2259 if(host->mmc->hold_reg_flag)
2260 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2262 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2264 timeout = jiffies + msecs_to_jiffies(500);
2267 ret = time_before(jiffies, timeout);
2268 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2272 MMC_DBG_ERR_FUNC(host->mmc,
2273 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2274 __func__, mmc_hostname(host->mmc));
2277 send_stop_abort(host, data);
2279 state = STATE_DATA_ERROR;
2283 MMC_DBG_CMD_FUNC(host->mmc,
2284 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2285 prev_state,state, mmc_hostname(host->mmc));
2287 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2288 &host->pending_events))
2290 MMC_DBG_INFO_FUNC(host->mmc,
2291 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2292 prev_state,state,mmc_hostname(host->mmc));
2294 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2295 prev_state = state = STATE_DATA_BUSY;
2298 case STATE_DATA_BUSY:
2299 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2300 &host->pending_events))
2303 dw_mci_deal_data_end(host, host->mrq);
2304 MMC_DBG_INFO_FUNC(host->mmc,
2305 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2306 prev_state,state,mmc_hostname(host->mmc));
2308 /* host->data = NULL; */
2309 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2310 status = host->data_status;
2312 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2313 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2314 MMC_DBG_ERR_FUNC(host->mmc,
2315 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2316 prev_state,state, status, mmc_hostname(host->mmc));
2318 if (status & SDMMC_INT_DRTO) {
2319 data->error = -ETIMEDOUT;
2320 } else if (status & SDMMC_INT_DCRC) {
2321 data->error = -EILSEQ;
2322 } else if (status & SDMMC_INT_EBE &&
2323 host->dir_status == DW_MCI_SEND_STATUS){
2325 * No data CRC status was returned.
2326 * The number of bytes transferred will
2327 * be exaggerated in PIO mode.
2329 data->bytes_xfered = 0;
2330 data->error = -ETIMEDOUT;
2339 * After an error, there may be data lingering
2340 * in the FIFO, so reset it - doing so
2341 * generates a block interrupt, hence setting
2342 * the scatter-gather pointer to NULL.
2344 dw_mci_fifo_reset(host);
2346 data->bytes_xfered = data->blocks * data->blksz;
2351 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2352 prev_state,state,mmc_hostname(host->mmc));
2353 dw_mci_request_end(host, host->mrq);
2356 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2357 prev_state,state,mmc_hostname(host->mmc));
2359 if (host->mrq->sbc && !data->error) {
2360 data->stop->error = 0;
2362 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2363 prev_state,state,mmc_hostname(host->mmc));
2365 dw_mci_request_end(host, host->mrq);
2369 prev_state = state = STATE_SENDING_STOP;
2371 send_stop_cmd(host, data);
2373 if (data->stop && !data->error) {
2374 /* stop command for open-ended transfer*/
2376 send_stop_abort(host, data);
2380 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2381 prev_state,state,mmc_hostname(host->mmc));
2383 case STATE_SENDING_STOP:
2384 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2387 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2388 prev_state, state, mmc_hostname(host->mmc));
2390 /* CMD error in data command */
2391 if (host->mrq->cmd->error && host->mrq->data) {
2392 dw_mci_fifo_reset(host);
2396 host->data = NULL; */
2398 dw_mci_command_complete(host, host->mrq->stop);
2400 if (host->mrq->stop)
2401 dw_mci_command_complete(host, host->mrq->stop);
2403 host->cmd_status = 0;
2406 dw_mci_request_end(host, host->mrq);
2409 case STATE_DATA_ERROR:
2410 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2411 &host->pending_events))
2414 state = STATE_DATA_BUSY;
2417 } while (state != prev_state);
2419 host->state = state;
2421 spin_unlock(&host->lock);
2425 /* push final bytes to part_buf, only use during push */
2426 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2428 memcpy((void *)&host->part_buf, buf, cnt);
2429 host->part_buf_count = cnt;
2432 /* append bytes to part_buf, only use during push */
2433 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2435 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2436 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2437 host->part_buf_count += cnt;
2441 /* pull first bytes from part_buf, only use during pull */
2442 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2444 cnt = min(cnt, (int)host->part_buf_count);
2446 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2448 host->part_buf_count -= cnt;
2449 host->part_buf_start += cnt;
2454 /* pull final bytes from the part_buf, assuming it's just been filled */
2455 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2457 memcpy(buf, &host->part_buf, cnt);
2458 host->part_buf_start = cnt;
2459 host->part_buf_count = (1 << host->data_shift) - cnt;
2462 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2464 struct mmc_data *data = host->data;
2467 /* try and push anything in the part_buf */
2468 if (unlikely(host->part_buf_count)) {
2469 int len = dw_mci_push_part_bytes(host, buf, cnt);
2472 if (host->part_buf_count == 2) {
2473 mci_writew(host, DATA(host->data_offset),
2475 host->part_buf_count = 0;
2478 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2479 if (unlikely((unsigned long)buf & 0x1)) {
2481 u16 aligned_buf[64];
2482 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2483 int items = len >> 1;
2485 /* memcpy from input buffer into aligned buffer */
2486 memcpy(aligned_buf, buf, len);
2489 /* push data from aligned buffer into fifo */
2490 for (i = 0; i < items; ++i)
2491 mci_writew(host, DATA(host->data_offset),
2498 for (; cnt >= 2; cnt -= 2)
2499 mci_writew(host, DATA(host->data_offset), *pdata++);
2502 /* put anything remaining in the part_buf */
2504 dw_mci_set_part_bytes(host, buf, cnt);
2505 /* Push data if we have reached the expected data length */
2506 if ((data->bytes_xfered + init_cnt) ==
2507 (data->blksz * data->blocks))
2508 mci_writew(host, DATA(host->data_offset),
2513 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2515 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2516 if (unlikely((unsigned long)buf & 0x1)) {
2518 /* pull data from fifo into aligned buffer */
2519 u16 aligned_buf[64];
2520 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2521 int items = len >> 1;
2523 for (i = 0; i < items; ++i)
2524 aligned_buf[i] = mci_readw(host,
2525 DATA(host->data_offset));
2526 /* memcpy from aligned buffer into output buffer */
2527 memcpy(buf, aligned_buf, len);
2535 for (; cnt >= 2; cnt -= 2)
2536 *pdata++ = mci_readw(host, DATA(host->data_offset));
2540 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2541 dw_mci_pull_final_bytes(host, buf, cnt);
2545 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2547 struct mmc_data *data = host->data;
2550 /* try and push anything in the part_buf */
2551 if (unlikely(host->part_buf_count)) {
2552 int len = dw_mci_push_part_bytes(host, buf, cnt);
2555 if (host->part_buf_count == 4) {
2556 mci_writel(host, DATA(host->data_offset),
2558 host->part_buf_count = 0;
2561 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2562 if (unlikely((unsigned long)buf & 0x3)) {
2564 u32 aligned_buf[32];
2565 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2566 int items = len >> 2;
2568 /* memcpy from input buffer into aligned buffer */
2569 memcpy(aligned_buf, buf, len);
2572 /* push data from aligned buffer into fifo */
2573 for (i = 0; i < items; ++i)
2574 mci_writel(host, DATA(host->data_offset),
2581 for (; cnt >= 4; cnt -= 4)
2582 mci_writel(host, DATA(host->data_offset), *pdata++);
2585 /* put anything remaining in the part_buf */
2587 dw_mci_set_part_bytes(host, buf, cnt);
2588 /* Push data if we have reached the expected data length */
2589 if ((data->bytes_xfered + init_cnt) ==
2590 (data->blksz * data->blocks))
2591 mci_writel(host, DATA(host->data_offset),
2596 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2598 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2599 if (unlikely((unsigned long)buf & 0x3)) {
2601 /* pull data from fifo into aligned buffer */
2602 u32 aligned_buf[32];
2603 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2604 int items = len >> 2;
2606 for (i = 0; i < items; ++i)
2607 aligned_buf[i] = mci_readl(host,
2608 DATA(host->data_offset));
2609 /* memcpy from aligned buffer into output buffer */
2610 memcpy(buf, aligned_buf, len);
2618 for (; cnt >= 4; cnt -= 4)
2619 *pdata++ = mci_readl(host, DATA(host->data_offset));
2623 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2624 dw_mci_pull_final_bytes(host, buf, cnt);
2628 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2630 struct mmc_data *data = host->data;
2633 /* try and push anything in the part_buf */
2634 if (unlikely(host->part_buf_count)) {
2635 int len = dw_mci_push_part_bytes(host, buf, cnt);
2639 if (host->part_buf_count == 8) {
2640 mci_writeq(host, DATA(host->data_offset),
2642 host->part_buf_count = 0;
2645 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2646 if (unlikely((unsigned long)buf & 0x7)) {
2648 u64 aligned_buf[16];
2649 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2650 int items = len >> 3;
2652 /* memcpy from input buffer into aligned buffer */
2653 memcpy(aligned_buf, buf, len);
2656 /* push data from aligned buffer into fifo */
2657 for (i = 0; i < items; ++i)
2658 mci_writeq(host, DATA(host->data_offset),
2665 for (; cnt >= 8; cnt -= 8)
2666 mci_writeq(host, DATA(host->data_offset), *pdata++);
2669 /* put anything remaining in the part_buf */
2671 dw_mci_set_part_bytes(host, buf, cnt);
2672 /* Push data if we have reached the expected data length */
2673 if ((data->bytes_xfered + init_cnt) ==
2674 (data->blksz * data->blocks))
2675 mci_writeq(host, DATA(host->data_offset),
2680 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2682 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2683 if (unlikely((unsigned long)buf & 0x7)) {
2685 /* pull data from fifo into aligned buffer */
2686 u64 aligned_buf[16];
2687 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2688 int items = len >> 3;
2690 for (i = 0; i < items; ++i)
2691 aligned_buf[i] = mci_readq(host,
2692 DATA(host->data_offset));
2693 /* memcpy from aligned buffer into output buffer */
2694 memcpy(buf, aligned_buf, len);
2702 for (; cnt >= 8; cnt -= 8)
2703 *pdata++ = mci_readq(host, DATA(host->data_offset));
2707 host->part_buf = mci_readq(host, DATA(host->data_offset));
2708 dw_mci_pull_final_bytes(host, buf, cnt);
2712 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2716 /* get remaining partial bytes */
2717 len = dw_mci_pull_part_bytes(host, buf, cnt);
2718 if (unlikely(len == cnt))
2723 /* get the rest of the data */
2724 host->pull_data(host, buf, cnt);
2727 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2729 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2731 unsigned int offset;
2732 struct mmc_data *data = host->data;
2733 int shift = host->data_shift;
2736 unsigned int remain, fcnt;
2738 if(!host->mmc->bus_refs){
2739 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2743 if (!sg_miter_next(sg_miter))
2746 host->sg = sg_miter->piter.sg;
2747 buf = sg_miter->addr;
2748 remain = sg_miter->length;
2752 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2753 << shift) + host->part_buf_count;
2754 len = min(remain, fcnt);
2757 dw_mci_pull_data(host, (void *)(buf + offset), len);
2758 data->bytes_xfered += len;
2763 sg_miter->consumed = offset;
2764 status = mci_readl(host, MINTSTS);
2765 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2766 /* if the RXDR is ready read again */
2767 } while ((status & SDMMC_INT_RXDR) ||
2768 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2771 if (!sg_miter_next(sg_miter))
2773 sg_miter->consumed = 0;
2775 sg_miter_stop(sg_miter);
2779 sg_miter_stop(sg_miter);
2783 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2786 static void dw_mci_write_data_pio(struct dw_mci *host)
2788 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2790 unsigned int offset;
2791 struct mmc_data *data = host->data;
2792 int shift = host->data_shift;
2795 unsigned int fifo_depth = host->fifo_depth;
2796 unsigned int remain, fcnt;
2798 if(!host->mmc->bus_refs){
2799 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2804 if (!sg_miter_next(sg_miter))
2807 host->sg = sg_miter->piter.sg;
2808 buf = sg_miter->addr;
2809 remain = sg_miter->length;
2813 fcnt = ((fifo_depth -
2814 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2815 << shift) - host->part_buf_count;
2816 len = min(remain, fcnt);
2819 host->push_data(host, (void *)(buf + offset), len);
2820 data->bytes_xfered += len;
2825 sg_miter->consumed = offset;
2826 status = mci_readl(host, MINTSTS);
2827 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2828 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2831 if (!sg_miter_next(sg_miter))
2833 sg_miter->consumed = 0;
2835 sg_miter_stop(sg_miter);
2839 sg_miter_stop(sg_miter);
2843 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2846 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2848 if (!host->cmd_status)
2849 host->cmd_status = status;
2856 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2857 tasklet_schedule(&host->tasklet);
2860 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2862 struct dw_mci *host = dev_id;
2863 u32 pending, sdio_int;
2866 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2869 * DTO fix - version 2.10a and below, and only if internal DMA
2872 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2874 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2875 pending |= SDMMC_INT_DATA_OVER;
2879 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2880 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2881 host->cmd_status = pending;
2883 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2884 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2886 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2889 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2890 /* if there is an error report DATA_ERROR */
2891 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2892 host->data_status = pending;
2894 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2896 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2897 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2898 tasklet_schedule(&host->tasklet);
2901 if (pending & SDMMC_INT_DATA_OVER) {
2902 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2903 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2904 if (!host->data_status)
2905 host->data_status = pending;
2907 if (host->dir_status == DW_MCI_RECV_STATUS) {
2908 if (host->sg != NULL)
2909 dw_mci_read_data_pio(host, true);
2911 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2912 tasklet_schedule(&host->tasklet);
2915 if (pending & SDMMC_INT_RXDR) {
2916 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2917 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2918 dw_mci_read_data_pio(host, false);
2921 if (pending & SDMMC_INT_TXDR) {
2922 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2923 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2924 dw_mci_write_data_pio(host);
2927 if (pending & SDMMC_INT_VSI) {
2928 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2929 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2930 dw_mci_cmd_interrupt(host, pending);
2933 if (pending & SDMMC_INT_CMD_DONE) {
2934 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2935 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2936 dw_mci_cmd_interrupt(host, pending);
2939 if (pending & SDMMC_INT_CD) {
2940 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2941 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2942 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2943 queue_work(host->card_workqueue, &host->card_work);
2946 if (pending & SDMMC_INT_HLE) {
2947 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2948 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2952 /* Handle SDIO Interrupts */
2953 for (i = 0; i < host->num_slots; i++) {
2954 struct dw_mci_slot *slot = host->slot[i];
2956 if (host->verid < DW_MMC_240A)
2957 sdio_int = SDMMC_INT_SDIO(i);
2959 sdio_int = SDMMC_INT_SDIO(i + 8);
2961 if (pending & sdio_int) {
2962 mci_writel(host, RINTSTS, sdio_int);
2963 mmc_signal_sdio_irq(slot->mmc);
2969 #ifdef CONFIG_MMC_DW_IDMAC
2970 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2971 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2972 /* Handle DMA interrupts */
2973 pending = mci_readl(host, IDSTS);
2974 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2975 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2976 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2977 host->dma_ops->complete((void *)host);
2985 static void dw_mci_work_routine_card(struct work_struct *work)
2987 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2990 for (i = 0; i < host->num_slots; i++) {
2991 struct dw_mci_slot *slot = host->slot[i];
2992 struct mmc_host *mmc = slot->mmc;
2993 struct mmc_request *mrq;
2996 present = dw_mci_get_cd(mmc);
2998 /* Card insert, switch data line to uart function, and vice verse.
2999 eONLY audi chip need switched by software, using udbg tag in dts!
3001 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3003 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3004 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3005 mmc_hostname(host->mmc));
3007 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3008 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3009 mmc_hostname(host->mmc));
3013 while (present != slot->last_detect_state) {
3014 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3015 present ? "inserted" : "removed");
3016 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3017 present ? "inserted" : "removed.", mmc_hostname(mmc));
3019 dw_mci_ctrl_all_reset(host);
3020 /* Stop edma when rountine card triggered */
3021 if(cpu_is_rk3036() || cpu_is_rk312x())
3022 if(host->dma_ops && host->dma_ops->stop)
3023 host->dma_ops->stop(host);
3024 rk_send_wakeup_key();//wake up system
3025 spin_lock_bh(&host->lock);
3027 /* Card change detected */
3028 slot->last_detect_state = present;
3030 /* Clean up queue if present */
3033 if (mrq == host->mrq) {
3037 switch (host->state) {
3040 case STATE_SENDING_CMD:
3041 mrq->cmd->error = -ENOMEDIUM;
3045 case STATE_SENDING_DATA:
3046 mrq->data->error = -ENOMEDIUM;
3047 dw_mci_stop_dma(host);
3049 case STATE_DATA_BUSY:
3050 case STATE_DATA_ERROR:
3051 if (mrq->data->error == -EINPROGRESS)
3052 mrq->data->error = -ENOMEDIUM;
3056 case STATE_SENDING_STOP:
3057 mrq->stop->error = -ENOMEDIUM;
3061 dw_mci_request_end(host, mrq);
3063 list_del(&slot->queue_node);
3064 mrq->cmd->error = -ENOMEDIUM;
3066 mrq->data->error = -ENOMEDIUM;
3068 mrq->stop->error = -ENOMEDIUM;
3070 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3071 mrq->cmd->opcode, mmc_hostname(mmc));
3073 spin_unlock(&host->lock);
3074 mmc_request_done(slot->mmc, mrq);
3075 spin_lock(&host->lock);
3079 /* Power down slot */
3081 /* Clear down the FIFO */
3082 dw_mci_fifo_reset(host);
3083 #ifdef CONFIG_MMC_DW_IDMAC
3084 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3085 dw_mci_idmac_reset(host);
3090 spin_unlock_bh(&host->lock);
3092 present = dw_mci_get_cd(mmc);
3095 mmc_detect_change(slot->mmc,
3096 msecs_to_jiffies(host->pdata->detect_delay_ms));
3101 /* given a slot id, find out the device node representing that slot */
3102 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3104 struct device_node *np;
3108 if (!dev || !dev->of_node)
3111 for_each_child_of_node(dev->of_node, np) {
3112 addr = of_get_property(np, "reg", &len);
3113 if (!addr || (len < sizeof(int)))
3115 if (be32_to_cpup(addr) == slot)
3121 static struct dw_mci_of_slot_quirks {
3124 } of_slot_quirks[] = {
3126 .quirk = "disable-wp",
3127 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3131 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3133 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3138 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3139 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3140 quirks |= of_slot_quirks[idx].id;
3145 /* find out bus-width for a given slot */
3146 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3148 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3154 if (of_property_read_u32(np, "bus-width", &bus_wd))
3155 dev_err(dev, "bus-width property not found, assuming width"
3161 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3162 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3164 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3170 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3172 /* Having a missing entry is valid; return silently */
3173 if (!gpio_is_valid(gpio))
3176 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3177 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3181 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3187 /* find the write protect gpio for a given slot; or -1 if none specified */
3188 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3190 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3196 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3198 /* Having a missing entry is valid; return silently */
3199 if (!gpio_is_valid(gpio))
3202 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3203 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3210 /* find the cd gpio for a given slot */
3211 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3212 struct mmc_host *mmc)
3214 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3220 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3222 /* Having a missing entry is valid; return silently */
3223 if (!gpio_is_valid(gpio))
3226 if (mmc_gpio_request_cd(mmc, gpio, 0))
3227 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3230 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3232 struct mmc_host *mmc = dev_id;
3233 struct dw_mci_slot *slot = mmc_priv(mmc);
3234 struct dw_mci *host = slot->host;
3235 int gpio_cd = slot->cd_gpio;
3237 (gpio_get_value(gpio_cd) == 0) ?
3238 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3239 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3241 /* wakeup system whether gpio debounce or not */
3242 rk_send_wakeup_key();
3244 /* no need to trigger detect flow when rescan is disabled.
3245 This case happended in dpm, that we just wakeup system and
3246 let suspend_post notify callback handle it.
3248 if(mmc->rescan_disable == 0)
3249 queue_work(host->card_workqueue, &host->card_work);
3251 printk("%s: rescan been disabled!\n", __FUNCTION__);
3256 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3257 struct mmc_host *mmc)
3259 struct dw_mci_slot *slot = mmc_priv(mmc);
3260 struct dw_mci *host = slot->host;
3264 /* Having a missing entry is valid; return silently */
3265 if (!gpio_is_valid(gpio))
3268 irq = gpio_to_irq(gpio);
3270 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3271 NULL, dw_mci_gpio_cd_irqt,
3272 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3276 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3278 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3279 enable_irq_wake(irq);
3282 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3286 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3287 struct mmc_host *mmc)
3289 if (!gpio_is_valid(gpio))
3292 if (gpio_to_irq(gpio) >= 0) {
3293 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3294 devm_gpio_free(&mmc->class_dev, gpio);
3297 #else /* CONFIG_OF */
3298 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3302 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3306 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3310 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3314 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3315 struct mmc_host *mmc)
3319 #endif /* CONFIG_OF */
3321 /* @host: dw_mci host prvdata
3322 * Init pinctrl for each platform. Usually we assign
3323 * "defalut" tag for functional usage, "idle" tag for gpio
3324 * state and "udbg" tag for uart_dbg if any.
3326 static void dw_mci_init_pinctrl(struct dw_mci *host)
3328 /* Fixme: DON'T TOUCH EMMC SETTING! */
3329 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3332 /* Get pinctrl for DTS */
3333 host->pinctrl = devm_pinctrl_get(host->dev);
3334 if (IS_ERR(host->pinctrl)) {
3335 dev_err(host->dev, "%s: No pinctrl used!\n",
3336 mmc_hostname(host->mmc));
3340 /* Lookup idle state */
3341 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3342 PINCTRL_STATE_IDLE);
3343 if (IS_ERR(host->pins_idle)) {
3344 dev_err(host->dev, "%s: No idle tag found!\n",
3345 mmc_hostname(host->mmc));
3347 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3348 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3349 mmc_hostname(host->mmc));
3352 /* Lookup default state */
3353 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3354 PINCTRL_STATE_DEFAULT);
3355 if (IS_ERR(host->pins_default)) {
3356 dev_err(host->dev, "%s: No default pinctrl found!\n",
3357 mmc_hostname(host->mmc));
3359 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3360 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3361 mmc_hostname(host->mmc));
3364 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3365 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3366 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3367 if (IS_ERR(host->pins_udbg)) {
3368 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3369 mmc_hostname(host->mmc));
3371 if (!dw_mci_get_cd(host->mmc))
3372 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3373 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3374 mmc_hostname(host->mmc));
3379 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3380 unsigned long mode, void *unused)
3382 struct mmc_host *host = container_of(
3383 notify_block, struct mmc_host, pm_notify);
3384 unsigned long flags;
3387 case PM_HIBERNATION_PREPARE:
3388 case PM_SUSPEND_PREPARE:
3389 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3390 spin_lock_irqsave(&host->lock, flags);
3391 host->rescan_disable = 1;
3392 spin_unlock_irqrestore(&host->lock, flags);
3393 if (cancel_delayed_work(&host->detect))
3394 wake_unlock(&host->detect_wake_lock);
3397 case PM_POST_SUSPEND:
3398 case PM_POST_HIBERNATION:
3399 case PM_POST_RESTORE:
3400 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3401 spin_lock_irqsave(&host->lock, flags);
3402 host->rescan_disable = 0;
3403 spin_unlock_irqrestore(&host->lock, flags);
3404 mmc_detect_change(host, 10);
3410 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3412 struct mmc_host *mmc;
3413 struct dw_mci_slot *slot;
3414 const struct dw_mci_drv_data *drv_data = host->drv_data;
3419 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3423 slot = mmc_priv(mmc);
3427 host->slot[id] = slot;
3430 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3432 mmc->ops = &dw_mci_ops;
3434 if (of_property_read_u32_array(host->dev->of_node,
3435 "clock-freq-min-max", freq, 2)) {
3436 mmc->f_min = DW_MCI_FREQ_MIN;
3437 mmc->f_max = DW_MCI_FREQ_MAX;
3439 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3440 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3442 mmc->f_min = freq[0];
3443 mmc->f_max = freq[1];
3445 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3446 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3449 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3451 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3452 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3453 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3454 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3455 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3456 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3458 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3459 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3460 if (register_pm_notifier(&mmc->pm_notify)) {
3461 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3462 goto err_pm_notifier;
3466 /* We assume only low-level chip use gpio_cd */
3467 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3468 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3469 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3470 if (gpio_is_valid(slot->cd_gpio)) {
3471 /* Request gpio int for card detection */
3472 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3474 slot->cd_gpio = -ENODEV;
3475 dev_err(host->dev, "failed to get your cd-gpios!\n");
3479 if (host->pdata->get_ocr)
3480 mmc->ocr_avail = host->pdata->get_ocr(id);
3483 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3484 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3485 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3486 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3490 * Start with slot power disabled, it will be enabled when a card
3493 if (host->pdata->setpower)
3494 host->pdata->setpower(id, 0);
3496 if (host->pdata->caps)
3497 mmc->caps = host->pdata->caps;
3499 if (host->pdata->pm_caps)
3500 mmc->pm_caps = host->pdata->pm_caps;
3502 if (host->dev->of_node) {
3503 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3507 ctrl_id = to_platform_device(host->dev)->id;
3509 if (drv_data && drv_data->caps)
3510 mmc->caps |= drv_data->caps[ctrl_id];
3511 if (drv_data && drv_data->hold_reg_flag)
3512 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3514 /* set the compatibility of driver. */
3515 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3516 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3518 if (host->pdata->caps2)
3519 mmc->caps2 = host->pdata->caps2;
3521 if (host->pdata->get_bus_wd)
3522 bus_width = host->pdata->get_bus_wd(slot->id);
3523 else if (host->dev->of_node)
3524 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3528 switch (bus_width) {
3530 mmc->caps |= MMC_CAP_8_BIT_DATA;
3532 mmc->caps |= MMC_CAP_4_BIT_DATA;
3535 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3536 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3537 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3538 mmc->caps |= MMC_CAP_SDIO_IRQ;
3539 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3540 mmc->caps |= MMC_CAP_HW_RESET;
3541 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3542 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3543 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3544 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3545 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3546 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3547 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3548 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3550 /*Assign pm_caps pass to pm_flags*/
3551 mmc->pm_flags = mmc->pm_caps;
3553 if (host->pdata->blk_settings) {
3554 mmc->max_segs = host->pdata->blk_settings->max_segs;
3555 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3556 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3557 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3558 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3560 /* Useful defaults if platform data is unset. */
3561 #ifdef CONFIG_MMC_DW_IDMAC
3562 mmc->max_segs = host->ring_size;
3563 mmc->max_blk_size = 65536;
3564 mmc->max_blk_count = host->ring_size;
3565 mmc->max_seg_size = 0x1000;
3566 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3567 if(cpu_is_rk3036() || cpu_is_rk312x()){
3568 /* fixup for external dmac setting */
3570 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3571 mmc->max_blk_count = 65535;
3572 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3573 mmc->max_seg_size = mmc->max_req_size;
3577 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3578 mmc->max_blk_count = 512;
3579 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3580 mmc->max_seg_size = mmc->max_req_size;
3581 #endif /* CONFIG_MMC_DW_IDMAC */
3585 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3587 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3592 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3593 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3597 if (IS_ERR(host->vmmc)) {
3598 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3601 ret = regulator_enable(host->vmmc);
3604 "failed to enable regulator: %d\n", ret);
3611 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3613 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3614 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3616 dw_mci_init_pinctrl(host);
3617 ret = mmc_add_host(mmc);
3621 #if defined(CONFIG_DEBUG_FS)
3622 dw_mci_init_debugfs(slot);
3625 /* Card initially undetected */
3626 slot->last_detect_state = 1;
3630 unregister_pm_notifier(&mmc->pm_notify);
3633 if (gpio_is_valid(slot->cd_gpio))
3634 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3639 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3641 /* Shutdown detect IRQ */
3642 if (slot->host->pdata->exit)
3643 slot->host->pdata->exit(id);
3645 /* Debugfs stuff is cleaned up by mmc core */
3646 mmc_remove_host(slot->mmc);
3647 slot->host->slot[id] = NULL;
3648 mmc_free_host(slot->mmc);
3651 static void dw_mci_init_dma(struct dw_mci *host)
3653 /* Alloc memory for sg translation */
3654 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3655 &host->sg_dma, GFP_KERNEL);
3656 if (!host->sg_cpu) {
3657 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3662 /* Determine which DMA interface to use */
3663 #if defined(CONFIG_MMC_DW_IDMAC)
3664 if(cpu_is_rk3036() || cpu_is_rk312x()){
3665 host->dma_ops = &dw_mci_edmac_ops;
3666 dev_info(host->dev, "Using external DMA controller.\n");
3668 host->dma_ops = &dw_mci_idmac_ops;
3669 dev_info(host->dev, "Using internal DMA controller.\n");
3676 if (host->dma_ops->init && host->dma_ops->start &&
3677 host->dma_ops->stop && host->dma_ops->cleanup) {
3678 if (host->dma_ops->init(host)) {
3679 dev_err(host->dev, "%s: Unable to initialize "
3680 "DMA Controller.\n", __func__);
3684 dev_err(host->dev, "DMA initialization not found.\n");
3692 dev_info(host->dev, "Using PIO mode.\n");
3697 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3699 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3702 ctrl = mci_readl(host, CTRL);
3704 mci_writel(host, CTRL, ctrl);
3706 /* wait till resets clear */
3708 ctrl = mci_readl(host, CTRL);
3709 if (!(ctrl & reset))
3711 } while (time_before(jiffies, timeout));
3714 "Timeout resetting block (ctrl reset %#x)\n",
3720 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3723 * Reseting generates a block interrupt, hence setting
3724 * the scatter-gather pointer to NULL.
3727 sg_miter_stop(&host->sg_miter);
3731 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3734 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3736 return dw_mci_ctrl_reset(host,
3737 SDMMC_CTRL_FIFO_RESET |
3739 SDMMC_CTRL_DMA_RESET);
3744 static struct dw_mci_of_quirks {
3749 .quirk = "broken-cd",
3750 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3754 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3756 struct dw_mci_board *pdata;
3757 struct device *dev = host->dev;
3758 struct device_node *np = dev->of_node;
3759 const struct dw_mci_drv_data *drv_data = host->drv_data;
3761 u32 clock_frequency;
3763 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3765 dev_err(dev, "could not allocate memory for pdata\n");
3766 return ERR_PTR(-ENOMEM);
3769 /* find out number of slots supported */
3770 if (of_property_read_u32(dev->of_node, "num-slots",
3771 &pdata->num_slots)) {
3772 dev_info(dev, "num-slots property not found, "
3773 "assuming 1 slot is available\n");
3774 pdata->num_slots = 1;
3778 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3779 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3780 pdata->quirks |= of_quirks[idx].id;
3783 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3784 dev_info(dev, "fifo-depth property not found, using "
3785 "value of FIFOTH register as default\n");
3787 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3789 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3790 pdata->bus_hz = clock_frequency;
3792 if (drv_data && drv_data->parse_dt) {
3793 ret = drv_data->parse_dt(host);
3795 return ERR_PTR(ret);
3798 if (of_find_property(np, "keep-power-in-suspend", NULL))
3799 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3801 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3802 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3804 if (of_find_property(np, "supports-highspeed", NULL))
3805 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3807 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3808 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3810 if (of_find_property(np, "supports-DDR_MODE", NULL))
3811 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3813 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3814 pdata->caps2 |= MMC_CAP2_HS200;
3816 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3817 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3819 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3820 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3822 if (of_get_property(np, "cd-inverted", NULL))
3823 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3824 if (of_get_property(np, "bootpart-no-access", NULL))
3825 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3830 #else /* CONFIG_OF */
3831 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3833 return ERR_PTR(-EINVAL);
3835 #endif /* CONFIG_OF */
3837 int dw_mci_probe(struct dw_mci *host)
3839 const struct dw_mci_drv_data *drv_data = host->drv_data;
3840 int width, i, ret = 0;
3846 host->pdata = dw_mci_parse_dt(host);
3847 if (IS_ERR(host->pdata)) {
3848 dev_err(host->dev, "platform data not available\n");
3853 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3855 "Platform data must supply select_slot function\n");
3860 * In 2.40a spec, Data offset is changed.
3861 * Need to check the version-id and set data-offset for DATA register.
3863 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3864 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3866 if (host->verid < DW_MMC_240A)
3867 host->data_offset = DATA_OFFSET;
3869 host->data_offset = DATA_240A_OFFSET;
3872 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3873 if (IS_ERR(host->hclk_mmc)) {
3874 dev_err(host->dev, "failed to get hclk_mmc\n");
3875 ret = PTR_ERR(host->hclk_mmc);
3879 clk_prepare_enable(host->hclk_mmc);
3882 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3883 if (IS_ERR(host->clk_mmc)) {
3884 dev_err(host->dev, "failed to get clk mmc_per\n");
3885 ret = PTR_ERR(host->clk_mmc);
3889 host->bus_hz = host->pdata->bus_hz;
3890 if (!host->bus_hz) {
3891 dev_err(host->dev,"Platform data must supply bus speed\n");
3896 if (host->verid < DW_MMC_240A)
3897 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3899 //rockchip: fix divider 2 in clksum before controlller
3900 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3903 dev_err(host->dev, "failed to set clk mmc\n");
3906 clk_prepare_enable(host->clk_mmc);
3908 if (drv_data && drv_data->setup_clock) {
3909 ret = drv_data->setup_clock(host);
3912 "implementation specific clock setup failed\n");
3917 host->quirks = host->pdata->quirks;
3918 host->irq_state = true;
3919 host->set_speed = 0;
3921 host->svi_flags = 0;
3923 spin_lock_init(&host->lock);
3924 spin_lock_init(&host->slock);
3926 INIT_LIST_HEAD(&host->queue);
3928 * Get the host data width - this assumes that HCON has been set with
3929 * the correct values.
3931 i = (mci_readl(host, HCON) >> 7) & 0x7;
3933 host->push_data = dw_mci_push_data16;
3934 host->pull_data = dw_mci_pull_data16;
3936 host->data_shift = 1;
3937 } else if (i == 2) {
3938 host->push_data = dw_mci_push_data64;
3939 host->pull_data = dw_mci_pull_data64;
3941 host->data_shift = 3;
3943 /* Check for a reserved value, and warn if it is */
3945 "HCON reports a reserved host data width!\n"
3946 "Defaulting to 32-bit access.\n");
3947 host->push_data = dw_mci_push_data32;
3948 host->pull_data = dw_mci_pull_data32;
3950 host->data_shift = 2;
3953 /* Reset all blocks */
3954 if (!dw_mci_ctrl_all_reset(host))
3957 host->dma_ops = host->pdata->dma_ops;
3958 dw_mci_init_dma(host);
3960 /* Clear the interrupts for the host controller */
3961 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3962 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3964 /* Put in max timeout */
3965 mci_writel(host, TMOUT, 0xFFFFFFFF);
3968 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3969 * Tx Mark = fifo_size / 2 DMA Size = 8
3971 if (!host->pdata->fifo_depth) {
3973 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3974 * have been overwritten by the bootloader, just like we're
3975 * about to do, so if you know the value for your hardware, you
3976 * should put it in the platform data.
3978 fifo_size = mci_readl(host, FIFOTH);
3979 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3981 fifo_size = host->pdata->fifo_depth;
3983 host->fifo_depth = fifo_size;
3985 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3986 mci_writel(host, FIFOTH, host->fifoth_val);
3988 /* disable clock to CIU */
3989 mci_writel(host, CLKENA, 0);
3990 mci_writel(host, CLKSRC, 0);
3992 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3993 host->card_workqueue = alloc_workqueue("dw-mci-card",
3994 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3995 if (!host->card_workqueue) {
3999 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4000 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4001 host->irq_flags, "dw-mci", host);
4005 if (host->pdata->num_slots)
4006 host->num_slots = host->pdata->num_slots;
4008 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4010 /* We need at least one slot to succeed */
4011 for (i = 0; i < host->num_slots; i++) {
4012 ret = dw_mci_init_slot(host, i);
4014 dev_dbg(host->dev, "slot %d init failed\n", i);
4020 * Enable interrupts for command done, data over, data empty, card det,
4021 * receive ready and error such as transmit, receive timeout, crc error
4023 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4024 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4025 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4026 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4027 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4028 regs |= SDMMC_INT_CD;
4030 mci_writel(host, INTMASK, regs);
4032 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4034 dev_info(host->dev, "DW MMC controller at irq %d, "
4035 "%d bit host data width, "
4037 host->irq, width, fifo_size);
4040 dev_info(host->dev, "%d slots initialized\n", init_slots);
4042 dev_dbg(host->dev, "attempted to initialize %d slots, "
4043 "but failed on all\n", host->num_slots);
4048 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4049 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4054 destroy_workqueue(host->card_workqueue);
4057 if (host->use_dma && host->dma_ops->exit)
4058 host->dma_ops->exit(host);
4061 regulator_disable(host->vmmc);
4062 regulator_put(host->vmmc);
4066 if (!IS_ERR(host->clk_mmc))
4067 clk_disable_unprepare(host->clk_mmc);
4069 if (!IS_ERR(host->hclk_mmc))
4070 clk_disable_unprepare(host->hclk_mmc);
4074 EXPORT_SYMBOL(dw_mci_probe);
4076 void dw_mci_remove(struct dw_mci *host)
4078 struct mmc_host *mmc = host->mmc;
4079 struct dw_mci_slot *slot = mmc_priv(mmc);
4082 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4083 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4085 for(i = 0; i < host->num_slots; i++){
4086 dev_dbg(host->dev, "remove slot %d\n", i);
4088 dw_mci_cleanup_slot(host->slot[i], i);
4091 /* disable clock to CIU */
4092 mci_writel(host, CLKENA, 0);
4093 mci_writel(host, CLKSRC, 0);
4095 destroy_workqueue(host->card_workqueue);
4096 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4097 unregister_pm_notifier(&host->mmc->pm_notify);
4099 if(host->use_dma && host->dma_ops->exit)
4100 host->dma_ops->exit(host);
4102 if (gpio_is_valid(slot->cd_gpio))
4103 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4106 regulator_disable(host->vmmc);
4107 regulator_put(host->vmmc);
4109 if(!IS_ERR(host->clk_mmc))
4110 clk_disable_unprepare(host->clk_mmc);
4112 if(!IS_ERR(host->hclk_mmc))
4113 clk_disable_unprepare(host->hclk_mmc);
4115 EXPORT_SYMBOL(dw_mci_remove);
4119 #ifdef CONFIG_PM_SLEEP
4121 * TODO: we should probably disable the clock to the card in the suspend path.
4123 extern int get_wifi_chip_type(void);
4124 int dw_mci_suspend(struct dw_mci *host)
4126 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4127 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4131 regulator_disable(host->vmmc);
4133 /*only for sdmmc controller*/
4134 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4135 disable_irq(host->irq);
4136 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4137 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4138 mmc_hostname(host->mmc));
4140 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4141 mci_writel(host, INTMASK, 0x00);
4142 mci_writel(host, CTRL, 0x00);
4144 /* Soc rk3126/3036 already in gpio_cd mode */
4145 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4146 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4147 enable_irq_wake(host->mmc->slot.cd_irq);
4152 EXPORT_SYMBOL(dw_mci_suspend);
4154 int dw_mci_resume(struct dw_mci *host)
4156 int i, ret, retry_cnt = 0;
4158 struct dw_mci_slot *slot;
4160 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4161 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4166 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4167 slot = mmc_priv(host->mmc);
4168 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4172 /*only for sdmmc controller*/
4173 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4174 /* Soc rk3126/3036 already in gpio_cd mode */
4175 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4176 disable_irq_wake(host->mmc->slot.cd_irq);
4177 mmc_gpio_free_cd(host->mmc);
4179 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4180 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4181 mmc_hostname(host->mmc));
4185 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4186 else if(cpu_is_rk3036())
4187 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4188 else if(cpu_is_rk312x())
4189 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4190 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4193 ret = regulator_enable(host->vmmc);
4196 "failed to enable regulator: %d\n", ret);
4201 if(!dw_mci_ctrl_all_reset(host)){
4206 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4207 if(host->use_dma && host->dma_ops->init)
4208 host->dma_ops->init(host);
4211 * Restore the initial value at FIFOTH register
4212 * And Invalidate the prev_blksz with zero
4214 mci_writel(host, FIFOTH, host->fifoth_val);
4215 host->prev_blksz = 0;
4216 /* Put in max timeout */
4217 mci_writel(host, TMOUT, 0xFFFFFFFF);
4219 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4220 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4222 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4223 regs |= SDMMC_INT_CD;
4224 mci_writel(host, INTMASK, regs);
4225 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4226 /*only for sdmmc controller*/
4227 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4228 enable_irq(host->irq);
4231 for(i = 0; i < host->num_slots; i++){
4232 struct dw_mci_slot *slot = host->slot[i];
4235 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4236 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4237 dw_mci_setup_bus(slot, true);
4243 EXPORT_SYMBOL(dw_mci_resume);
4244 #endif /* CONFIG_PM_SLEEP */
4246 static int __init dw_mci_init(void)
4248 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4252 static void __exit dw_mci_exit(void)
4256 module_init(dw_mci_init);
4257 module_exit(dw_mci_exit);
4259 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4260 MODULE_AUTHOR("NXP Semiconductor VietNam");
4261 MODULE_AUTHOR("Imagination Technologies Ltd");
4262 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4263 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4264 MODULE_LICENSE("GPL v2");