2 * Rockchip eMMC Interface driver
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/stat.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/bitops.h>
32 #include <mach/dma-pl330.h>
33 #include <asm/scatterlist.h>
34 #include <mach/iomux.h>
35 #include <mach/board.h>
38 struct rk29_dma_client mmc_client;
40 static int rk_mmc_pre_dma_transfer(struct rk_mmc *host,
41 struct mmc_data *data,
44 static int rk_mmc_show_regs(struct rk_mmc *host)
46 mmc_info(host, "CTRL: 0x%08x\n", mmc_readl(host, CTRL));
47 mmc_info(host, "PWREN: 0x%08x\n", mmc_readl(host, PWREN));
48 mmc_info(host, "CLKDIV: 0x%08x\n", mmc_readl(host, CLKDIV));
49 mmc_info(host, "CLKENA: 0x%08x\n", mmc_readl(host, CLKENA));
50 mmc_info(host, "CLKSRC: 0x%08x\n", mmc_readl(host, CLKSRC));
51 mmc_info(host, "TMOUT: 0x%08x\n", mmc_readl(host, TMOUT));
52 mmc_info(host, "CTYPE: 0x%08x\n", mmc_readl(host, CTYPE));
53 mmc_info(host, "BLKSIZ: 0x%08x\n", mmc_readl(host, BLKSIZ));
54 mmc_info(host, "BYTCNT: 0x%08x\n", mmc_readl(host, BYTCNT));
55 mmc_info(host, "INTMASK: 0x%08x\n", mmc_readl(host, INTMASK));
56 mmc_info(host, "CMDARG: 0x%08x\n", mmc_readl(host, CMDARG));
57 mmc_info(host, "CMD: 0x%08x\n", mmc_readl(host, CMD));
58 mmc_info(host, "RESP0: 0x%08x\n", mmc_readl(host, RESP0));
59 mmc_info(host, "RESP1: 0x%08x\n", mmc_readl(host, RESP1));
60 mmc_info(host, "RESP2: 0x%08x\n", mmc_readl(host, RESP2));
61 mmc_info(host, "RESP3: 0x%08x\n", mmc_readl(host, RESP3));
62 mmc_info(host, "MINTSTS: 0x%08x\n", mmc_readl(host, MINTSTS));
63 mmc_info(host, "STATUS: 0x%08x\n", mmc_readl(host, STATUS));
64 mmc_info(host, "FIFOTH: 0x%08x\n", mmc_readl(host, FIFOTH));
65 mmc_info(host, "CDETECT: 0x%08x\n", mmc_readl(host, CDETECT));
66 mmc_info(host, "WRTPRT: 0x%08x\n", mmc_readl(host, WRTPRT));
67 mmc_info(host, "TCBCNT: 0x%08x\n", mmc_readl(host, TCBCNT));
68 mmc_info(host, "TBBCNT: 0x%08x\n", mmc_readl(host, TBBCNT));
69 mmc_info(host, "DEBNCE: 0x%08x\n", mmc_readl(host, DEBNCE));
70 mmc_info(host, "USRID: 0x%08x\n", mmc_readl(host, USRID));
71 mmc_info(host, "VERID: 0x%08x\n", mmc_readl(host, VERID));
72 mmc_info(host, "UHS_REG: 0x%08x\n", mmc_readl(host, UHS_REG));
73 mmc_info(host, "RST_N: 0x%08x\n", mmc_readl(host, RST_N));
79 #define MMC_DMA_CHN DMACH_EMMC
80 static void dma_callback_func(void *arg, int size, enum rk29_dma_buffresult result)
82 struct rk_mmc *host = (struct rk_mmc *)arg;
84 host->dma_xfer_size += size;
86 mmc_dbg(host, "total: %u, xfer: %u\n", host->data->blocks * host->data->blksz, host->dma_xfer_size);
87 if(host->dma_xfer_size == host->data->blocks * host->data->blksz){
88 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
89 tasklet_schedule(&host->tasklet);
95 static int dma_init(struct rk_mmc *host)
99 res = rk29_dma_request(MMC_DMA_CHN, &mmc_client, NULL);
103 res = rk29_dma_config(MMC_DMA_CHN, 4, 16);
107 res = rk29_dma_set_buffdone_fn(MMC_DMA_CHN, dma_callback_func);
111 static void dma_exit(struct rk_mmc *host)
113 rk29_dma_free(MMC_DMA_CHN, NULL);
115 static int dma_start(struct rk_mmc *host)
117 int i, res, direction, sg_len;
118 enum rk29_dmasrc src;
119 struct mmc_data *data = host->data;
123 host->dma_xfer_size = 0;
125 if (data->flags & MMC_DATA_READ){
126 direction = DMA_FROM_DEVICE;
127 src = RK29_DMASRC_HW;
129 direction = DMA_TO_DEVICE;
130 src = RK29_DMASRC_MEM;
133 sg_len = rk_mmc_pre_dma_transfer(host, host->data, 0);
135 host->ops->stop(host);
138 res = rk29_dma_devconfig(MMC_DMA_CHN, src, host->dma_addr);
139 if(unlikely(res < 0))
142 for(i = 0; i < sg_len; i++){
143 res = rk29_dma_enqueue(MMC_DMA_CHN, host,
144 sg_dma_address(&data->sg[i]),
145 sg_dma_len(&data->sg[i]));
146 if(unlikely(res < 0))
149 res = rk29_dma_ctrl(MMC_DMA_CHN, RK29_DMAOP_START);
150 if(unlikely(res < 0))
155 static int dma_stop(struct rk_mmc *host)
160 /* Disable and reset the DMA interface */
161 temp = mmc_readl(host, CTRL);
162 temp &= ~MMC_CTRL_DMA_ENABLE;
163 temp |= MMC_CTRL_DMA_RESET;
164 mmc_writel(host, CTRL, temp);
166 res = rk29_dma_ctrl(MMC_DMA_CHN, RK29_DMAOP_STOP);
167 if(unlikely(res < 0))
170 rk29_dma_ctrl(MMC_DMA_CHN, RK29_DMAOP_FLUSH);
174 struct rk_mmc_dma_ops dma_ops = {
181 #if defined(CONFIG_DEBUG_FS)
182 static int rk_mmc_req_show(struct seq_file *s, void *v)
184 struct rk_mmc *host = s->private;
185 struct mmc_request *mrq;
186 struct mmc_command *cmd;
187 struct mmc_command *stop;
188 struct mmc_data *data;
190 /* Make sure we get a consistent snapshot */
191 spin_lock_bh(&host->lock);
201 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
202 cmd->opcode, cmd->arg, cmd->flags,
203 cmd->resp[0], cmd->resp[1], cmd->resp[2],
204 cmd->resp[2], cmd->error);
206 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
207 data->bytes_xfered, data->blocks,
208 data->blksz, data->flags, data->error);
211 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
212 stop->opcode, stop->arg, stop->flags,
213 stop->resp[0], stop->resp[1], stop->resp[2],
214 stop->resp[2], stop->error);
217 spin_unlock_bh(&host->lock);
222 static int rk_mmc_req_open(struct inode *inode, struct file *file)
224 return single_open(file, rk_mmc_req_show, inode->i_private);
227 static const struct file_operations rk_mmc_req_fops = {
228 .owner = THIS_MODULE,
229 .open = rk_mmc_req_open,
232 .release = single_release,
235 static int rk_mmc_regs_show(struct seq_file *s, void *v)
237 struct rk_mmc *host = s->private;
239 seq_printf(s, "CTRL: 0x%08x\n", mmc_readl(host, CTRL));
240 seq_printf(s, "PWREN: 0x%08x\n", mmc_readl(host, PWREN));
241 seq_printf(s, "CLKDIV: 0x%08x\n", mmc_readl(host, CLKDIV));
242 seq_printf(s, "CLKENA: 0x%08x\n", mmc_readl(host, CLKENA));
243 seq_printf(s, "CLKSRC: 0x%08x\n", mmc_readl(host, CLKSRC));
244 seq_printf(s, "TMOUT: 0x%08x\n", mmc_readl(host, TMOUT));
245 seq_printf(s, "CTYPE: 0x%08x\n", mmc_readl(host, CTYPE));
246 seq_printf(s, "BLKSIZ: 0x%08x\n", mmc_readl(host, BLKSIZ));
247 seq_printf(s, "BYTCNT: 0x%08x\n", mmc_readl(host, BYTCNT));
248 seq_printf(s, "INTMASK: 0x%08x\n", mmc_readl(host, INTMASK));
249 seq_printf(s, "CMDARG: 0x%08x\n", mmc_readl(host, CMDARG));
250 seq_printf(s, "CMD: 0x%08x\n", mmc_readl(host, CMD));
251 seq_printf(s, "RESP0: 0x%08x\n", mmc_readl(host, RESP0));
252 seq_printf(s, "RESP1: 0x%08x\n", mmc_readl(host, RESP1));
253 seq_printf(s, "RESP2: 0x%08x\n", mmc_readl(host, RESP2));
254 seq_printf(s, "RESP3: 0x%08x\n", mmc_readl(host, RESP3));
255 seq_printf(s, "MINTSTS: 0x%08x\n", mmc_readl(host, MINTSTS));
256 seq_printf(s, "STATUS: 0x%08x\n", mmc_readl(host, STATUS));
257 seq_printf(s, "FIFOTH: 0x%08x\n", mmc_readl(host, FIFOTH));
258 seq_printf(s, "CDETECT: 0x%08x\n", mmc_readl(host, CDETECT));
259 seq_printf(s, "WRTPRT: 0x%08x\n", mmc_readl(host, WRTPRT));
260 seq_printf(s, "TCBCNT: 0x%08x\n", mmc_readl(host, TCBCNT));
261 seq_printf(s, "TBBCNT: 0x%08x\n", mmc_readl(host, TBBCNT));
262 seq_printf(s, "DEBNCE: 0x%08x\n", mmc_readl(host, DEBNCE));
263 seq_printf(s, "USRID: 0x%08x\n", mmc_readl(host, USRID));
264 seq_printf(s, "VERID: 0x%08x\n", mmc_readl(host, VERID));
265 seq_printf(s, "UHS_REG: 0x%08x\n", mmc_readl(host, UHS_REG));
266 seq_printf(s, "RST_N: 0x%08x\n", mmc_readl(host, RST_N));
271 static int rk_mmc_regs_open(struct inode *inode, struct file *file)
273 return single_open(file, rk_mmc_regs_show, inode->i_private);
276 static const struct file_operations rk_mmc_regs_fops = {
277 .owner = THIS_MODULE,
278 .open = rk_mmc_regs_open,
281 .release = single_release,
284 static void rk_mmc_init_debugfs(struct rk_mmc *host)
286 struct mmc_host *mmc = host->mmc;
290 root = mmc->debugfs_root;
294 node = debugfs_create_file("regs", S_IRUSR, root, host,
299 node = debugfs_create_file("req", S_IRUSR, root, host,
304 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
308 node = debugfs_create_x32("pending_events", S_IRUSR, root,
309 (u32 *)&host->pending_events);
313 node = debugfs_create_x32("completed_events", S_IRUSR, root,
314 (u32 *)&host->completed_events);
321 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
323 #endif /* defined(CONFIG_DEBUG_FS) */
325 static void rk_mmc_set_timeout(struct rk_mmc *host)
327 /* timeout (maximum) */
328 mmc_writel(host, TMOUT, 0xffffffff);
331 static bool mci_wait_reset(struct rk_mmc *host)
333 unsigned long timeout = jiffies + msecs_to_jiffies(500);
336 mmc_writel(host, CTRL, (MMC_CTRL_RESET | MMC_CTRL_FIFO_RESET |
337 MMC_CTRL_DMA_RESET));
339 /* wait till resets clear */
341 ctrl = mmc_readl(host, CTRL);
342 if (!(ctrl & (MMC_CTRL_RESET | MMC_CTRL_FIFO_RESET |
343 MMC_CTRL_DMA_RESET)))
345 } while (time_before(jiffies, timeout));
347 mmc_err(host, "Timeout resetting block (ctrl %#x)\n", ctrl);
353 static void mmc_wait_data_idle(struct rk_mmc *host)
355 unsigned long timeout = jiffies + msecs_to_jiffies(500);
356 unsigned int status = 0;
358 while (time_before(jiffies, timeout)) {
359 status = mmc_readl(host, STATUS);
360 if (!(status & MMC_DATA_BUSY) && !(status & MMC_MC_BUSY))
363 mmc_err(host, "Timeout waiting for data idle (status 0x%x)\n", status);
366 static u32 rk_mmc_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
368 struct mmc_data *data;
370 cmd->error = -EINPROGRESS;
374 if (cmdr == MMC_STOP_TRANSMISSION)
375 cmdr |= MMC_CMD_STOP;
377 cmdr |= MMC_CMD_PRV_DAT_WAIT;
379 if (cmd->flags & MMC_RSP_PRESENT) {
380 /* We expect a response, so set this bit */
381 cmdr |= MMC_CMD_RESP_EXP;
382 if (cmd->flags & MMC_RSP_136)
383 cmdr |= MMC_CMD_RESP_LONG;
386 if (cmd->flags & MMC_RSP_CRC)
387 cmdr |= MMC_CMD_RESP_CRC;
391 cmdr |= MMC_CMD_DAT_EXP;
392 if (data->flags & MMC_DATA_STREAM)
393 cmdr |= MMC_CMD_STRM_MODE;
394 if (data->flags & MMC_DATA_WRITE)
395 cmdr |= MMC_CMD_DAT_WR;
401 static void rk_mmc_start_command(struct rk_mmc *host,
402 struct mmc_command *cmd, u32 cmd_flags)
406 mmc_writel(host, CMDARG, cmd->arg);
408 mmc_writel(host, CMD, cmd_flags | MMC_CMD_START | MMC_USE_HOLD_REG);
410 static void send_stop_cmd_ex(struct rk_mmc *host)
412 struct mmc_command cmd;
415 host->stop.opcode = MMC_STOP_TRANSMISSION;
416 host->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
418 host->stop.data = NULL;
419 host->stop.mrq = NULL;
420 host->stop.retries = 0;
421 host->stop.error = 0;
422 cmdflags = rk_mmc_prepare_command(host->mmc, &host->stop);
425 mmc_dbg(host,"stop command ex: CMD%d, ARGR=0x%08x CMDR=0x%08x\n",
426 host->stop.opcode, host->stop.arg, cmdflags);
427 rk_mmc_start_command(host, &cmd, cmdflags);
430 static void send_stop_cmd(struct rk_mmc *host, struct mmc_data *data)
432 mmc_dbg(host,"stop command: CMD%d, ARGR=0x%08x CMDR=0x%08x\n",
433 data->stop->opcode, data->stop->arg, host->stop_cmdr);
434 rk_mmc_start_command(host, data->stop, host->stop_cmdr);
437 static void rk_mmc_dma_cleanup(struct rk_mmc *host)
439 struct mmc_data *data = host->data;
442 if (!data->host_cookie)
443 dma_unmap_sg(host->dev, data->sg, data->sg_len,
444 ((data->flags & MMC_DATA_WRITE)
445 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
448 /* DMA interface functions */
449 static void rk_mmc_stop_dma(struct rk_mmc *host)
452 mmc_dbg(host, "stop dma\n");
453 host->ops->stop(host);
454 rk_mmc_dma_cleanup(host);
456 /* Data transfer was stopped by the interrupt handler */
457 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
461 static int rk_mmc_submit_data_dma(struct rk_mmc *host, struct mmc_data *data)
466 /* Enable the DMA interface */
467 temp = mmc_readl(host, CTRL);
468 temp |= MMC_CTRL_DMA_ENABLE;
469 mmc_writel(host, CTRL, temp);
471 /* Disable RX/TX IRQs, let DMA handle it */
472 temp = mmc_readl(host, INTMASK);
473 temp &= ~(MMC_INT_RXDR | MMC_INT_TXDR);
474 mmc_writel(host, INTMASK, temp);
476 res = host->ops->start(host);
480 static void rk_mmc_submit_data(struct rk_mmc *host, struct mmc_data *data)
484 data->error = -EINPROGRESS;
490 if (rk_mmc_submit_data_dma(host, data)) {
491 mmc_dbg(host, "FIFO transfer\n");
493 host->pio_offset = 0;
494 if (data->flags & MMC_DATA_READ)
495 host->dir_status = MMC_RECV_DATA;
497 host->dir_status = MMC_SEND_DATA;
499 mmc_writel(host, RINTSTS, MMC_INT_TXDR | MMC_INT_RXDR);
500 temp = mmc_readl(host, INTMASK);
501 temp |= MMC_INT_TXDR | MMC_INT_RXDR;
502 mmc_writel(host, INTMASK, temp);
504 temp = mmc_readl(host, CTRL);
505 temp &= ~MMC_CTRL_DMA_ENABLE;
506 mmc_writel(host, CTRL, temp);
509 mmc_dbg(host, "DMA transfer\n");
514 static void __rk_mmc_start_request(struct rk_mmc *host, struct mmc_command *cmd)
516 struct mmc_request *mrq = host->mrq;
517 struct mmc_data *data;
522 host->pending_events = 0;
523 host->completed_events = 0;
524 host->data_status = 0;
528 rk_mmc_set_timeout(host);
529 mmc_writel(host, BYTCNT, data->blksz*data->blocks);
530 mmc_writel(host, BLKSIZ, data->blksz);
533 cmdflags = rk_mmc_prepare_command(host->mmc, cmd);
535 /* this is the first command, send the initialization clock */
536 if (test_and_clear_bit(MMC_NEED_INIT, &host->flags))
537 cmdflags |= MMC_CMD_INIT;
540 cmdflags |= MMC_CMD_INIT;
543 rk_mmc_submit_data(host, data);
545 if(cmd->opcode == MMC_BUS_TEST_R || cmd->opcode == MMC_BUS_TEST_W)
550 mmc_dbg(host,"start command: CMD%d, ARGR=0x%08x CMDR=0x%08x\n",
551 cmd->opcode, cmd->arg, cmdflags);
552 rk_mmc_start_command(host, cmd, cmdflags);
555 host->stop_cmdr = rk_mmc_prepare_command(host->mmc, mrq->stop);
558 static void rk_mmc_start_request(struct rk_mmc *host)
560 struct mmc_request *mrq = host->mrq;
561 struct mmc_command *cmd;
563 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
564 __rk_mmc_start_request(host, cmd);
566 static void rk_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
568 struct rk_mmc *host = mmc_priv(mmc);
571 WARN_ON(host->state != STATE_IDLE);
572 WARN_ON(host->shutdown == 1);
574 spin_lock_bh(&host->lock);
575 host->state = STATE_SENDING_CMD;
577 rk_mmc_start_request(host);
578 spin_unlock_bh(&host->lock);
581 static void mci_send_cmd(struct rk_mmc *host, u32 cmd, u32 arg)
583 unsigned long timeout = jiffies + msecs_to_jiffies(500);
584 unsigned int cmd_status = 0;
586 mmc_writel(host, CMDARG, arg);
587 mmc_writel(host, CMD, MMC_CMD_START | cmd);
589 while (time_before(jiffies, timeout)) {
590 cmd_status = mmc_readl(host, CMD);
591 if (!(cmd_status & MMC_CMD_START))
594 mmc_err(host, "Timeout sending command (cmd %#x arg %#x status %#x)\n",
595 cmd, arg, cmd_status);
599 static void rk_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
601 struct rk_mmc *host = mmc_priv(mmc);
604 /* set default 1 bit mode */
605 host->ctype = MMC_CTYPE_1BIT;
607 switch (ios->bus_width) {
608 case MMC_BUS_WIDTH_1:
609 host->ctype = MMC_CTYPE_1BIT;
611 case MMC_BUS_WIDTH_4:
612 host->ctype = MMC_CTYPE_4BIT;
614 case MMC_BUS_WIDTH_8:
615 host->ctype = MMC_CTYPE_8BIT;
619 if (ios->timing == MMC_TIMING_UHS_DDR50){
620 regs = mmc_readl(host, UHS_REG);
621 regs |= MMC_UHS_DDR_MODE;
622 mmc_writel(host, UHS_REG, regs);
624 if (ios->clock && ios->clock != host->curr_clock) {
625 if (host->bus_hz % ios->clock)
626 div = ((host->bus_hz / ios->clock) >> 1) + 1;
628 div = (host->bus_hz / ios->clock) >> 1;
630 mmc_dbg(host, "Bus clock: %dHz, req: %dHz, actual: %dHz, div: %d\n",
631 host->bus_hz, ios->clock,
632 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
635 mmc_writel(host, CLKENA, 0);
636 mmc_writel(host, CLKSRC, 0);
640 MMC_CMD_UPD_CLK | MMC_CMD_PRV_DAT_WAIT, 0);
642 /* set clock to desired speed */
643 mmc_writel(host, CLKDIV, div);
647 MMC_CMD_UPD_CLK | MMC_CMD_PRV_DAT_WAIT, 0);
650 mmc_writel(host, CLKENA, MMC_CLKEN_ENABLE | MMC_CLKEN_LOW_PWR);
654 MMC_CMD_UPD_CLK | MMC_CMD_PRV_DAT_WAIT, 0);
656 host->curr_clock = ios->clock;
659 switch (ios->power_mode) {
661 mmc_dbg(host, "power up\n");
662 mmc_writel(host, PWREN, MMC_PWREN_ON);
664 mmc_writel(host, RST_N, 0);
666 mmc_writel(host, RST_N, MMC_CARD_RESET);
668 set_bit(MMC_NEED_INIT, &host->flags);
671 mmc_dbg(host, "power off\n");
672 mmc_writel(host, PWREN, 0);
676 mmc_dbg(host, "ctype: 0x%x\n", host->ctype);
677 mmc_writel(host, CTYPE, host->ctype);
680 static int rk_mmc_get_ro(struct mmc_host *mmc)
682 //struct rk_mmc *host = mmc_priv(mmc);
687 static int rk_mmc_get_cd(struct mmc_host *mmc)
689 //struct rk_mmc *host = mmc_priv(mmc);
694 static int rk_mmc_get_dma_dir(struct mmc_data *data)
696 if (data->flags & MMC_DATA_WRITE)
697 return DMA_TO_DEVICE;
699 return DMA_FROM_DEVICE;
702 static int rk_mmc_pre_dma_transfer(struct rk_mmc *host,
703 struct mmc_data *data,
706 struct scatterlist *sg;
707 unsigned int i, sg_len;
709 if (!next && data->host_cookie)
710 return data->host_cookie;
713 * We don't do DMA on "complex" transfers, i.e. with
714 * non-word-aligned buffers or lengths. Also, we don't bother
715 * with all the DMA setup overhead for short transfers.
717 if (data->blocks * data->blksz < MMC_DMA_THRESHOLD)
722 for_each_sg(data->sg, sg, data->sg_len, i) {
723 if (sg->offset & 3 || sg->length & 3)
727 sg_len = dma_map_sg(host->dev,
730 rk_mmc_get_dma_dir(data));
734 data->host_cookie = sg_len;
738 static void rk_mmc_pre_req(struct mmc_host *mmc,
739 struct mmc_request *mrq,
742 struct rk_mmc *host = mmc_priv(mmc);
743 struct mmc_data *data = mrq->data;
747 if (data->host_cookie) {
748 data->host_cookie = 0;
751 if (rk_mmc_pre_dma_transfer(host, mrq->data, 1) < 0)
752 data->host_cookie = 0;
755 static void rk_mmc_post_req(struct mmc_host *mmc,
756 struct mmc_request *mrq,
759 struct rk_mmc *host = mmc_priv(mmc);
760 struct mmc_data *data = mrq->data;
764 if (data->host_cookie)
765 dma_unmap_sg(host->dev,
768 rk_mmc_get_dma_dir(data));
769 data->host_cookie = 0;
772 static const struct mmc_host_ops rk_mmc_ops = {
773 .request = rk_mmc_request,
774 .set_ios = rk_mmc_set_ios,
775 .get_ro = rk_mmc_get_ro,
776 .get_cd = rk_mmc_get_cd,
777 .pre_req = rk_mmc_pre_req,
778 .post_req = rk_mmc_post_req,
781 static void rk_mmc_request_end(struct rk_mmc *host, struct mmc_request *mrq)
782 __releases(&host->lock)
783 __acquires(&host->lock)
786 WARN_ON(host->cmd || host->data);
788 host->state = STATE_IDLE;
789 spin_unlock(&host->lock);
790 mmc_wait_data_idle(host);
791 mmc_dbg(host, "mmc request done, RINSTS: 0x%x, pending_events: %lu\n",
792 mmc_readl(host, RINTSTS), host->pending_events);
793 if(host->bus_test && mrq->data && mrq->data->error == 0){
796 ctype = mmc_readl(host, CTYPE);
797 div = mmc_readl(host, CLKDIV);
799 if(ctype & MMC_CTYPE_8BIT)
800 mmc_info(host, "bus width: 8 bit, clock: %uHz\n",
801 host->bus_hz/(div+1));
802 else if(ctype & MMC_CTYPE_4BIT)
803 mmc_info(host, "bus width: 4 bit, clock: %uHz\n",
804 host->bus_hz/(div+1));
806 mmc_info(host, "bus width: 1 bit, clock: %uHz\n",
807 host->bus_hz/(div+1));
809 mmc_request_done(host->mmc, mrq);
810 spin_lock(&host->lock);
813 static void rk_mmc_command_complete(struct rk_mmc *host, struct mmc_command *cmd)
815 u32 status = host->cmd_status;
817 host->cmd_status = 0;
819 /* Read the response from the card (up to 16 bytes) */
820 if (cmd->flags & MMC_RSP_PRESENT) {
821 if (cmd->flags & MMC_RSP_136) {
822 cmd->resp[3] = mmc_readl(host, RESP0);
823 cmd->resp[2] = mmc_readl(host, RESP1);
824 cmd->resp[1] = mmc_readl(host, RESP2);
825 cmd->resp[0] = mmc_readl(host, RESP3);
827 cmd->resp[0] = mmc_readl(host, RESP0);
834 if (status & MMC_INT_RTO){
835 mmc_dbg(host, "CMD%d response timeout\n", cmd->opcode);
836 cmd->error = -ETIMEDOUT;
838 else if ((cmd->flags & MMC_RSP_CRC) && (status & MMC_INT_RCRC)){
839 mmc_dbg(host, "CMD%d crc error\n", cmd->opcode);
840 cmd->error = -EILSEQ;
842 else if (status & MMC_INT_RESP_ERR){
843 mmc_dbg(host, "CMD%d response error\n", cmd->opcode);
850 /* newer ip versions need a delay between retries */
855 rk_mmc_stop_dma(host);
860 static void rk_mmc_tasklet_func(unsigned long priv)
862 struct rk_mmc *host = (struct rk_mmc *)priv;
863 struct mmc_data *data;
864 struct mmc_command *cmd;
865 enum rk_mmc_state state;
866 enum rk_mmc_state prev_state;
869 spin_lock(&host->lock);
881 case STATE_SENDING_CMD:
882 mmc_dbg(host, "sending cmd, pending_events: %lx\n", host->pending_events);
883 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
884 &host->pending_events))
889 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
890 rk_mmc_command_complete(host, cmd);
891 if (cmd == host->mrq->sbc && !cmd->error) {
892 prev_state = state = STATE_SENDING_CMD;
893 __rk_mmc_start_request(host, host->mrq->cmd);
897 if (!host->mrq->data || cmd->error) {
898 rk_mmc_request_end(host, host->mrq);
902 prev_state = state = STATE_SENDING_DATA;
905 case STATE_SENDING_DATA:
906 mmc_dbg(host, "sending data, pending_events: %lx\n", host->pending_events);
907 if (test_and_clear_bit(EVENT_DATA_ERROR,
908 &host->pending_events)) {
909 rk_mmc_stop_dma(host);
911 send_stop_cmd(host, data);
913 send_stop_cmd_ex(host);
914 state = STATE_DATA_ERROR;
918 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
919 &host->pending_events))
922 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
923 prev_state = state = STATE_DATA_BUSY;
926 case STATE_DATA_BUSY:
927 mmc_dbg(host, "data busy, pending_events: %lx, data_status: %08x, status: %08x\n",
928 host->pending_events, host->data_status, mmc_readl(host, STATUS));
929 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
930 &host->pending_events)){
934 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
935 status = host->data_status;
937 if (status & MMC_DATA_ERROR_FLAGS) {
938 if (status & MMC_INT_DTO) {
940 mmc_err(host, "data timeout error "
941 "(data_status=%08x)\n", status);
942 data->error = -ETIMEDOUT;
943 } else if (status & MMC_INT_DCRC) {
945 mmc_err(host, "data CRC error "
946 "(data_status=%08x)\n", status);
947 data->error = -EILSEQ;
950 mmc_err(host, "data FIFO error "
951 "(data_status=%08x)\n", status);
955 data->bytes_xfered = data->blocks * data->blksz;
959 if (!data->stop && !host->stop_ex) {
960 rk_mmc_request_end(host, host->mrq);
964 if (host->mrq->sbc && !data->error) {
965 data->stop->error = 0;
966 rk_mmc_request_end(host, host->mrq);
970 prev_state = state = STATE_SENDING_STOP;
971 if (!data->error && data->stop)
972 send_stop_cmd(host, data);
975 case STATE_SENDING_STOP:
976 mmc_dbg(host, "sending stop, pending_events: %lx\n", host->pending_events);
977 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
978 &host->pending_events))
984 rk_mmc_command_complete(host, &host->stop);
987 rk_mmc_command_complete(host, host->mrq->stop);
988 rk_mmc_request_end(host, host->mrq);
991 case STATE_DATA_ERROR:
992 mmc_dbg(host, "data error, pending_events: %lx\n", host->pending_events);
993 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
994 &host->pending_events))
997 state = STATE_DATA_BUSY;
1000 } while (state != prev_state);
1002 host->state = state;
1004 spin_unlock(&host->lock);
1009 static void rk_mmc_push_data(struct rk_mmc *host, void *buf, int cnt)
1011 u32 *pdata = (u32 *)buf;
1013 WARN_ON(cnt % 4 != 0);
1014 WARN_ON((unsigned long)pdata & 0x3);
1018 mmc_writel(host, DATA, *pdata++);
1023 static void rk_mmc_pull_data(struct rk_mmc *host, void *buf, int cnt)
1025 u32 *pdata = (u32 *)buf;
1027 WARN_ON(cnt % 4 != 0);
1028 WARN_ON((unsigned long)pdata & 0x3);
1032 *pdata++ = mmc_readl(host, DATA);
1037 static void rk_mmc_read_data_pio(struct rk_mmc *host)
1039 struct scatterlist *sg = host->sg;
1040 void *buf = sg_virt(sg);
1041 unsigned int offset = host->pio_offset;
1042 struct mmc_data *data = host->data;
1044 unsigned int nbytes = 0, len;
1046 mmc_dbg(host, "read data pio\n");
1049 len = MMC_GET_FCNT(mmc_readl(host, STATUS)) << 2;
1050 if (offset + len <= sg->length) {
1051 rk_mmc_pull_data(host, (void *)(buf + offset), len);
1056 if (offset == sg->length) {
1057 flush_dcache_page(sg_page(sg));
1058 host->sg = sg = sg_next(sg);
1066 unsigned int remaining = sg->length - offset;
1067 rk_mmc_pull_data(host, (void *)(buf + offset),
1069 nbytes += remaining;
1071 flush_dcache_page(sg_page(sg));
1072 host->sg = sg = sg_next(sg);
1076 offset = len - remaining;
1078 rk_mmc_pull_data(host, buf, offset);
1082 status = mmc_readl(host, MINTSTS);
1083 mmc_writel(host, RINTSTS, MMC_INT_RXDR);
1084 if (status & MMC_DATA_ERROR_FLAGS) {
1085 host->data_status = status;
1086 data->bytes_xfered += nbytes;
1088 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1090 tasklet_schedule(&host->tasklet);
1093 } while (status & MMC_INT_RXDR); /*if the RXDR is ready read again*/
1094 len = MMC_GET_FCNT(mmc_readl(host, STATUS));
1095 host->pio_offset = offset;
1096 data->bytes_xfered += nbytes;
1100 data->bytes_xfered += nbytes;
1101 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1104 static void rk_mmc_write_data_pio(struct rk_mmc *host)
1106 struct scatterlist *sg = host->sg;
1107 void *buf = sg_virt(sg);
1108 unsigned int offset = host->pio_offset;
1109 struct mmc_data *data = host->data;
1111 unsigned int nbytes = 0, len;
1113 mmc_dbg(host, "write data pio\n");
1116 (MMC_GET_FCNT(mmc_readl(host, STATUS)) << 2);
1117 if (offset + len <= sg->length) {
1118 rk_mmc_push_data(host, (void *)(buf + offset), len);
1122 if (offset == sg->length) {
1123 host->sg = sg = sg_next(sg);
1131 unsigned int remaining = sg->length - offset;
1133 rk_mmc_push_data(host, (void *)(buf + offset),
1135 nbytes += remaining;
1137 host->sg = sg = sg_next(sg);
1141 offset = len - remaining;
1143 rk_mmc_push_data(host, (void *)buf, offset);
1147 status = mmc_readl(host, MINTSTS);
1148 mmc_writel(host, RINTSTS, MMC_INT_TXDR);
1149 if (status & MMC_DATA_ERROR_FLAGS) {
1150 host->data_status = status;
1151 data->bytes_xfered += nbytes;
1153 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1155 tasklet_schedule(&host->tasklet);
1158 } while (status & MMC_INT_TXDR); /* if TXDR write again */
1160 host->pio_offset = offset;
1161 data->bytes_xfered += nbytes;
1166 data->bytes_xfered += nbytes;
1167 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1170 static void rk_mmc_cmd_interrupt(struct rk_mmc *host, u32 status)
1172 if (!host->cmd_status)
1173 host->cmd_status = status;
1175 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1176 tasklet_schedule(&host->tasklet);
1179 static irqreturn_t rk_mmc_interrupt(int irq, void *dev_id)
1181 struct rk_mmc *host = dev_id;
1182 u32 status, pending;
1183 unsigned int pass_count = 0;
1186 status = mmc_readl(host, RINTSTS);
1187 pending = mmc_readl(host, MINTSTS); /* read-only mask reg */
1188 mmc_dbg(host, "RINSTS: 0x%x, MINTSTS: 0x%x\n", status, pending);
1193 if (pending & MMC_CMD_ERROR_FLAGS) {
1194 mmc_writel(host, RINTSTS, MMC_CMD_ERROR_FLAGS);
1195 host->cmd_status = status;
1196 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1197 tasklet_schedule(&host->tasklet);
1200 if (pending & MMC_DATA_ERROR_FLAGS) {
1201 /* if there is an error report DATA_ERROR */
1202 mmc_writel(host, RINTSTS, MMC_DATA_ERROR_FLAGS);
1203 host->data_status = status;
1204 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1205 tasklet_schedule(&host->tasklet);
1208 if (pending & MMC_INT_DATA_OVER) {
1209 mmc_dbg(host, "data over int\n");
1210 mmc_writel(host, RINTSTS, MMC_INT_DATA_OVER);
1211 if (!host->data_status)
1212 host->data_status = status;
1213 if (host->dir_status == MMC_RECV_DATA) {
1214 if (host->sg != NULL)
1215 rk_mmc_read_data_pio(host);
1217 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1218 tasklet_schedule(&host->tasklet);
1221 if (pending & MMC_INT_RXDR) {
1222 mmc_writel(host, RINTSTS, MMC_INT_RXDR);
1224 rk_mmc_read_data_pio(host);
1227 if (pending & MMC_INT_TXDR) {
1228 mmc_writel(host, RINTSTS, MMC_INT_TXDR);
1230 rk_mmc_write_data_pio(host);
1233 if (pending & MMC_INT_CMD_DONE) {
1234 mmc_writel(host, RINTSTS, MMC_INT_CMD_DONE);
1235 rk_mmc_cmd_interrupt(host, status);
1237 } while (pass_count++ < 5);
1242 #define EMMC_FLAHS_SEL (1<<11)
1243 static int internal_storage_is_emmc(void)
1245 #ifdef CONFIG_ARCH_RK3026
1246 if((iomux_is_set(EMMC_CLKOUT) == 1) &&
1247 (iomux_is_set(EMMC_CMD) == 1) &&
1248 (iomux_is_set(EMMC_D0) == 1))
1251 if(readl_relaxed(RK30_GRF_BASE + GRF_SOC_CON0) & EMMC_FLAHS_SEL)
1256 static void rk_mmc_set_iomux(void)
1258 iomux_set(EMMC_CLKOUT);
1259 iomux_set(EMMC_CMD);
1260 iomux_set(EMMC_RSTNOUT);
1261 #ifdef CONFIG_ARCH_RK3026
1262 iomux_set(EMMC_PWREN);
1274 static int rk_mmc_probe(struct platform_device *pdev)
1276 struct rk_mmc *host;
1277 struct mmc_host *mmc;
1278 struct resource *regs;
1281 if(!internal_storage_is_emmc()){
1282 dev_err(&pdev->dev, "internal_storage is NOT emmc\n");
1288 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1292 mmc = mmc_alloc_host(sizeof(struct rk_mmc), &pdev->dev);
1296 host = mmc_priv(mmc);
1299 mmc->ops = &rk_mmc_ops;
1302 host->irq = platform_get_irq(pdev, 0);
1306 host->dev = &pdev->dev;
1307 host->ops = &dma_ops;
1308 host->state = STATE_IDLE;
1311 host->clk = clk_get(&pdev->dev, "emmc");
1314 clk_set_rate(host->clk, MMC_BUS_CLOCK);
1315 host->bus_hz = clk_get_rate(host->clk);
1317 clk_enable(host->clk);
1318 clk_enable(clk_get(&pdev->dev, "hclk_emmc"));
1320 spin_lock_init(&host->lock);
1322 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1326 host->dma_addr = regs->start + MMC_DATA;
1328 res = host->ops->init(host);
1332 /* Reset all blocks */
1333 if (!mci_wait_reset(host)) {
1338 /* Clear the interrupts for the host controller */
1339 mmc_writel(host, RINTSTS, 0xFFFFFFFF);
1340 mmc_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1342 /* Put in max timeout */
1343 mmc_writel(host, TMOUT, 0xFFFFFFFF);
1344 mmc_writel(host, FIFOTH,
1345 (0x3 << 28) | ((FIFO_DETH/2 - 1) << 16) | ((FIFO_DETH/2) << 0));
1346 /* disable clock to CIU */
1347 mmc_writel(host, CLKENA, 0);
1348 mmc_writel(host, CLKSRC, 0);
1349 tasklet_init(&host->tasklet, rk_mmc_tasklet_func, (unsigned long)host);
1351 res = request_irq(host->irq, rk_mmc_interrupt, 0, "emmc", host);
1355 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1356 mmc->f_max = host->bus_hz/2;
1358 mmc->ocr_avail = MMC_VDD_165_195| MMC_VDD_29_30 | MMC_VDD_30_31 |
1359 MMC_VDD_31_32 | MMC_VDD_32_33 | MMC_VDD_33_34;
1361 mmc->caps = MMC_CAP_4_BIT_DATA| MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
1362 MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50 |
1363 MMC_CAP_BUS_WIDTH_TEST |
1366 /*MMC_CAP_WAIT_WHILE_BUSY |*/
1367 MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1369 //mmc->caps2 = MMC_CAP2_CACHE_CTRL;
1372 mmc->max_blk_size = 512;
1373 mmc->max_blk_count = 4096;
1374 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1375 mmc->max_seg_size = mmc->max_req_size;
1377 if(grf_get_io_power_domain_voltage(IO_PD_FLASH) == IO_PD_VOLTAGE_1_8V)
1378 mmc_writel(host, UHS_REG, MMC_UHS_VOLT_18);
1380 mmc_writel(host, RINTSTS, 0xFFFFFFFF);
1381 mmc_writel(host, INTMASK, MMC_INT_CMD_DONE | MMC_INT_DATA_OVER |
1382 MMC_INT_TXDR | MMC_INT_RXDR | MMC_ERROR_FLAGS);
1383 mmc_writel(host, CTRL, MMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1384 platform_set_drvdata(pdev, host);
1388 #if defined(CONFIG_DEBUG_FS)
1389 rk_mmc_init_debugfs(host);
1392 mmc_info(host, "MMC controller initialized, bus_hz: %uHz\n", host->bus_hz);
1396 host->ops->exit(host);
1398 iounmap(host->regs);
1400 clk_disable(host->clk);
1401 clk_disable(clk_get(&pdev->dev, "hclk_mmc"));
1408 static void rk_mmc_shutdown(struct platform_device *pdev)
1410 struct rk_mmc *host = platform_get_drvdata(pdev);
1411 //struct mmc_host *mmc = host->mmc;
1413 mmc_info(host, "shutdown\n");
1416 //card go pre-idle state
1417 mmc_writel(host, CMDARG, 0xF0F0F0F0);
1418 mmc_writel(host, CMD, 0 | MMC_CMD_INIT | MMC_CMD_START | MMC_USE_HOLD_REG);
1422 mmc_remove_host(host->mmc);
1423 mmc_info(host, "mmc removed\n");
1424 platform_set_drvdata(pdev, NULL);
1426 host->ops->exit(host);
1428 free_irq(host->irq, host);
1429 mmc_writel(host, RINTSTS, 0xFFFFFFFF);
1430 mmc_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1431 mmc_writel(host, PWREN, 0);
1432 mmc_writel(host, RST_N, 0);
1434 /* disable clock to CIU */
1435 mmc_writel(host, CLKENA, 0);
1436 mmc_writel(host, CLKSRC, 0);
1437 clk_disable(host->clk);
1438 clk_disable(clk_get(&pdev->dev, "hclk_mmc"));
1441 iounmap(host->regs);
1445 mmc_writel(host, PWREN, 0);
1446 mmc_writel(host, RST_N, 0);
1450 static int __exit rk_mmc_remove(struct platform_device *pdev)
1452 rk_mmc_shutdown(pdev);
1456 static int rk_mmc_suspend(struct platform_device *pdev, pm_message_t mesg)
1459 struct rk_mmc *host = platform_get_drvdata(pdev);
1461 res = mmc_suspend_host(host->mmc);
1465 static int rk_mmc_resume(struct platform_device *pdev)
1469 struct rk_mmc *host = platform_get_drvdata(pdev);
1471 if (!mci_wait_reset(host)) {
1475 mmc_writel(host, FIFOTH,
1476 (0x3 << 28) | ((FIFO_DETH/2 - 1) << 16) | ((FIFO_DETH/2) << 0));
1478 mmc_writel(host, UHS_REG, 0);
1480 /* disable clock to CIU */
1481 mmc_writel(host, CLKENA, 0);
1482 mmc_writel(host, CLKSRC, 0);
1484 mmc_writel(host, RINTSTS, 0xFFFFFFFF);
1485 mmc_writel(host, INTMASK, MMC_INT_CMD_DONE | MMC_INT_DATA_OVER |
1486 MMC_INT_TXDR | MMC_INT_RXDR | MMC_ERROR_FLAGS);
1487 mmc_writel(host, CTRL, MMC_CTRL_INT_ENABLE);
1489 res = mmc_resume_host(host->mmc);
1494 #define rk_mmc_suspend NULL
1495 #define rk_mmc_resume NULL
1496 #endif /* CONFIG_PM */
1498 static struct platform_driver rk_mmc_driver = {
1499 .remove = __exit_p(rk_mmc_remove),
1500 .shutdown = rk_mmc_shutdown,
1501 .suspend = rk_mmc_suspend,
1502 .resume = rk_mmc_resume,
1508 static int __init rk_mmc_init(void)
1510 return platform_driver_probe(&rk_mmc_driver, rk_mmc_probe);
1513 static void __exit rk_mmc_exit(void)
1515 platform_driver_unregister(&rk_mmc_driver);
1518 fs_initcall(rk_mmc_init);
1519 module_exit(rk_mmc_exit);