2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
65 struct idmac_desc_64addr {
66 u32 des0; /* Control Descriptor */
68 u32 des1; /* Reserved */
70 u32 des2; /*Buffer sizes */
71 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
73 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
75 u32 des3; /* Reserved */
77 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
78 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
80 u32 des6; /* Lower 32-bits of Next Descriptor Address */
81 u32 des7; /* Upper 32-bits of Next Descriptor Address */
85 __le32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 __le32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
98 __le32 des2; /* buffer 1 physical address */
100 __le32 des3; /* buffer 2 physical address */
102 #endif /* CONFIG_MMC_DW_IDMAC */
104 static bool dw_mci_reset(struct dw_mci *host);
105 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
106 static int dw_mci_card_busy(struct mmc_host *mmc);
108 #if defined(CONFIG_DEBUG_FS)
109 static int dw_mci_req_show(struct seq_file *s, void *v)
111 struct dw_mci_slot *slot = s->private;
112 struct mmc_request *mrq;
113 struct mmc_command *cmd;
114 struct mmc_command *stop;
115 struct mmc_data *data;
117 /* Make sure we get a consistent snapshot */
118 spin_lock_bh(&slot->host->lock);
128 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
129 cmd->opcode, cmd->arg, cmd->flags,
130 cmd->resp[0], cmd->resp[1], cmd->resp[2],
131 cmd->resp[2], cmd->error);
133 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
134 data->bytes_xfered, data->blocks,
135 data->blksz, data->flags, data->error);
138 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
139 stop->opcode, stop->arg, stop->flags,
140 stop->resp[0], stop->resp[1], stop->resp[2],
141 stop->resp[2], stop->error);
144 spin_unlock_bh(&slot->host->lock);
149 static int dw_mci_req_open(struct inode *inode, struct file *file)
151 return single_open(file, dw_mci_req_show, inode->i_private);
154 static const struct file_operations dw_mci_req_fops = {
155 .owner = THIS_MODULE,
156 .open = dw_mci_req_open,
159 .release = single_release,
162 static int dw_mci_regs_show(struct seq_file *s, void *v)
164 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
165 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
166 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
167 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
168 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
169 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
174 static int dw_mci_regs_open(struct inode *inode, struct file *file)
176 return single_open(file, dw_mci_regs_show, inode->i_private);
179 static const struct file_operations dw_mci_regs_fops = {
180 .owner = THIS_MODULE,
181 .open = dw_mci_regs_open,
184 .release = single_release,
187 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
189 struct mmc_host *mmc = slot->mmc;
190 struct dw_mci *host = slot->host;
194 root = mmc->debugfs_root;
198 node = debugfs_create_file("regs", S_IRUSR, root, host,
203 node = debugfs_create_file("req", S_IRUSR, root, slot,
208 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
212 node = debugfs_create_x32("pending_events", S_IRUSR, root,
213 (u32 *)&host->pending_events);
217 node = debugfs_create_x32("completed_events", S_IRUSR, root,
218 (u32 *)&host->completed_events);
225 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
227 #endif /* defined(CONFIG_DEBUG_FS) */
229 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
231 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
233 struct mmc_data *data;
234 struct dw_mci_slot *slot = mmc_priv(mmc);
235 struct dw_mci *host = slot->host;
236 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
238 cmd->error = -EINPROGRESS;
242 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
243 cmd->opcode == MMC_GO_IDLE_STATE ||
244 cmd->opcode == MMC_GO_INACTIVE_STATE ||
245 (cmd->opcode == SD_IO_RW_DIRECT &&
246 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
247 cmdr |= SDMMC_CMD_STOP;
248 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
249 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
251 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
254 /* Special bit makes CMD11 not die */
255 cmdr |= SDMMC_CMD_VOLT_SWITCH;
257 /* Change state to continue to handle CMD11 weirdness */
258 WARN_ON(slot->host->state != STATE_SENDING_CMD);
259 slot->host->state = STATE_SENDING_CMD11;
262 * We need to disable low power mode (automatic clock stop)
263 * while doing voltage switch so we don't confuse the card,
264 * since stopping the clock is a specific part of the UHS
265 * voltage change dance.
267 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
268 * unconditionally turned back on in dw_mci_setup_bus() if it's
269 * ever called with a non-zero clock. That shouldn't happen
270 * until the voltage change is all done.
272 clk_en_a = mci_readl(host, CLKENA);
273 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
274 mci_writel(host, CLKENA, clk_en_a);
275 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
276 SDMMC_CMD_PRV_DAT_WAIT, 0);
279 if (cmd->flags & MMC_RSP_PRESENT) {
280 /* We expect a response, so set this bit */
281 cmdr |= SDMMC_CMD_RESP_EXP;
282 if (cmd->flags & MMC_RSP_136)
283 cmdr |= SDMMC_CMD_RESP_LONG;
286 if (cmd->flags & MMC_RSP_CRC)
287 cmdr |= SDMMC_CMD_RESP_CRC;
291 cmdr |= SDMMC_CMD_DAT_EXP;
292 if (data->flags & MMC_DATA_STREAM)
293 cmdr |= SDMMC_CMD_STRM_MODE;
294 if (data->flags & MMC_DATA_WRITE)
295 cmdr |= SDMMC_CMD_DAT_WR;
298 if (drv_data && drv_data->prepare_command)
299 drv_data->prepare_command(slot->host, &cmdr);
304 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
306 struct mmc_command *stop;
312 stop = &host->stop_abort;
314 memset(stop, 0, sizeof(struct mmc_command));
316 if (cmdr == MMC_READ_SINGLE_BLOCK ||
317 cmdr == MMC_READ_MULTIPLE_BLOCK ||
318 cmdr == MMC_WRITE_BLOCK ||
319 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
320 cmdr == MMC_SEND_TUNING_BLOCK ||
321 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
322 stop->opcode = MMC_STOP_TRANSMISSION;
324 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
325 } else if (cmdr == SD_IO_RW_EXTENDED) {
326 stop->opcode = SD_IO_RW_DIRECT;
327 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
328 ((cmd->arg >> 28) & 0x7);
329 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
334 cmdr = stop->opcode | SDMMC_CMD_STOP |
335 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
340 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
342 unsigned long timeout = jiffies + msecs_to_jiffies(500);
345 * Databook says that before issuing a new data transfer command
346 * we need to check to see if the card is busy. Data transfer commands
347 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
349 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
352 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
353 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
354 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
355 if (time_after(jiffies, timeout)) {
356 /* Command will fail; we'll pass error then */
357 dev_err(host->dev, "Busy; trying anyway\n");
365 static void dw_mci_start_command(struct dw_mci *host,
366 struct mmc_command *cmd, u32 cmd_flags)
370 "start command: ARGR=0x%08x CMDR=0x%08x\n",
371 cmd->arg, cmd_flags);
373 mci_writel(host, CMDARG, cmd->arg);
375 dw_mci_wait_while_busy(host, cmd_flags);
377 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
380 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
382 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
383 dw_mci_start_command(host, stop, host->stop_cmdr);
386 /* DMA interface functions */
387 static void dw_mci_stop_dma(struct dw_mci *host)
389 if (host->using_dma) {
390 host->dma_ops->stop(host);
391 host->dma_ops->cleanup(host);
394 /* Data transfer was stopped by the interrupt handler */
395 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
398 static int dw_mci_get_dma_dir(struct mmc_data *data)
400 if (data->flags & MMC_DATA_WRITE)
401 return DMA_TO_DEVICE;
403 return DMA_FROM_DEVICE;
406 #ifdef CONFIG_MMC_DW_IDMAC
407 static void dw_mci_dma_cleanup(struct dw_mci *host)
409 struct mmc_data *data = host->data;
412 if (!data->host_cookie)
413 dma_unmap_sg(host->dev,
416 dw_mci_get_dma_dir(data));
419 static void dw_mci_idmac_reset(struct dw_mci *host)
421 u32 bmod = mci_readl(host, BMOD);
422 /* Software reset of DMA */
423 bmod |= SDMMC_IDMAC_SWRESET;
424 mci_writel(host, BMOD, bmod);
427 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
431 /* Disable and reset the IDMAC interface */
432 temp = mci_readl(host, CTRL);
433 temp &= ~SDMMC_CTRL_USE_IDMAC;
434 temp |= SDMMC_CTRL_DMA_RESET;
435 mci_writel(host, CTRL, temp);
437 /* Stop the IDMAC running */
438 temp = mci_readl(host, BMOD);
439 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
440 temp |= SDMMC_IDMAC_SWRESET;
441 mci_writel(host, BMOD, temp);
444 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
446 struct mmc_data *data = host->data;
448 dev_vdbg(host->dev, "DMA complete\n");
450 host->dma_ops->cleanup(host);
453 * If the card was removed, data will be NULL. No point in trying to
454 * send the stop command or waiting for NBUSY in this case.
457 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
458 tasklet_schedule(&host->tasklet);
462 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
466 if (host->dma_64bit_address == 1) {
467 struct idmac_desc_64addr *desc = host->sg_cpu;
469 for (i = 0; i < sg_len; i++, desc++) {
470 unsigned int length = sg_dma_len(&data->sg[i]);
471 u64 mem_addr = sg_dma_address(&data->sg[i]);
474 * Set the OWN bit and disable interrupts for this
477 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
480 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
482 /* Physical address to DMA to/from */
483 desc->des4 = mem_addr & 0xffffffff;
484 desc->des5 = mem_addr >> 32;
487 /* Set first descriptor */
489 desc->des0 |= IDMAC_DES0_FD;
491 /* Set last descriptor */
492 desc = host->sg_cpu + (i - 1) *
493 sizeof(struct idmac_desc_64addr);
494 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
495 desc->des0 |= IDMAC_DES0_LD;
498 struct idmac_desc *desc = host->sg_cpu;
500 for (i = 0; i < sg_len; i++, desc++) {
501 unsigned int length = sg_dma_len(&data->sg[i]);
502 u32 mem_addr = sg_dma_address(&data->sg[i]);
505 * Set the OWN bit and disable interrupts for this
508 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
509 IDMAC_DES0_DIC | IDMAC_DES0_CH);
511 IDMAC_SET_BUFFER1_SIZE(desc, length);
513 /* Physical address to DMA to/from */
514 desc->des2 = cpu_to_le32(mem_addr);
517 /* Set first descriptor */
519 desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
521 /* Set last descriptor */
522 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
523 desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
524 desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
530 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
534 dw_mci_translate_sglist(host, host->data, sg_len);
536 /* Make sure to reset DMA in case we did PIO before this */
537 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
538 dw_mci_idmac_reset(host);
540 /* Select IDMAC interface */
541 temp = mci_readl(host, CTRL);
542 temp |= SDMMC_CTRL_USE_IDMAC;
543 mci_writel(host, CTRL, temp);
547 /* Enable the IDMAC */
548 temp = mci_readl(host, BMOD);
549 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
550 mci_writel(host, BMOD, temp);
552 /* Start it running */
553 mci_writel(host, PLDMND, 1);
556 static int dw_mci_idmac_init(struct dw_mci *host)
560 if (host->dma_64bit_address == 1) {
561 struct idmac_desc_64addr *p;
562 /* Number of descriptors in the ring buffer */
563 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
565 /* Forward link the descriptor list */
566 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
568 p->des6 = (host->sg_dma +
569 (sizeof(struct idmac_desc_64addr) *
570 (i + 1))) & 0xffffffff;
572 p->des7 = (u64)(host->sg_dma +
573 (sizeof(struct idmac_desc_64addr) *
575 /* Initialize reserved and buffer size fields to "0" */
581 /* Set the last descriptor as the end-of-ring descriptor */
582 p->des6 = host->sg_dma & 0xffffffff;
583 p->des7 = (u64)host->sg_dma >> 32;
584 p->des0 = IDMAC_DES0_ER;
587 struct idmac_desc *p;
588 /* Number of descriptors in the ring buffer */
589 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
591 /* Forward link the descriptor list */
592 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
593 p->des3 = cpu_to_le32(host->sg_dma +
594 (sizeof(struct idmac_desc) * (i + 1)));
598 /* Set the last descriptor as the end-of-ring descriptor */
599 p->des3 = cpu_to_le32(host->sg_dma);
600 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
603 dw_mci_idmac_reset(host);
605 if (host->dma_64bit_address == 1) {
606 /* Mask out interrupts - get Tx & Rx complete only */
607 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
608 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
609 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
611 /* Set the descriptor base address */
612 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
613 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
616 /* Mask out interrupts - get Tx & Rx complete only */
617 mci_writel(host, IDSTS, IDMAC_INT_CLR);
618 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
619 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
621 /* Set the descriptor base address */
622 mci_writel(host, DBADDR, host->sg_dma);
628 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
629 .init = dw_mci_idmac_init,
630 .start = dw_mci_idmac_start_dma,
631 .stop = dw_mci_idmac_stop_dma,
632 .complete = dw_mci_idmac_complete_dma,
633 .cleanup = dw_mci_dma_cleanup,
635 #endif /* CONFIG_MMC_DW_IDMAC */
637 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
638 struct mmc_data *data,
641 struct scatterlist *sg;
642 unsigned int i, sg_len;
644 if (!next && data->host_cookie)
645 return data->host_cookie;
648 * We don't do DMA on "complex" transfers, i.e. with
649 * non-word-aligned buffers or lengths. Also, we don't bother
650 * with all the DMA setup overhead for short transfers.
652 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
658 for_each_sg(data->sg, sg, data->sg_len, i) {
659 if (sg->offset & 3 || sg->length & 3)
663 sg_len = dma_map_sg(host->dev,
666 dw_mci_get_dma_dir(data));
671 data->host_cookie = sg_len;
676 static void dw_mci_pre_req(struct mmc_host *mmc,
677 struct mmc_request *mrq,
680 struct dw_mci_slot *slot = mmc_priv(mmc);
681 struct mmc_data *data = mrq->data;
683 if (!slot->host->use_dma || !data)
686 if (data->host_cookie) {
687 data->host_cookie = 0;
691 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
692 data->host_cookie = 0;
695 static void dw_mci_post_req(struct mmc_host *mmc,
696 struct mmc_request *mrq,
699 struct dw_mci_slot *slot = mmc_priv(mmc);
700 struct mmc_data *data = mrq->data;
702 if (!slot->host->use_dma || !data)
705 if (data->host_cookie)
706 dma_unmap_sg(slot->host->dev,
709 dw_mci_get_dma_dir(data));
710 data->host_cookie = 0;
713 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
715 #ifdef CONFIG_MMC_DW_IDMAC
716 unsigned int blksz = data->blksz;
717 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
718 u32 fifo_width = 1 << host->data_shift;
719 u32 blksz_depth = blksz / fifo_width, fifoth_val;
720 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
721 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
723 tx_wmark = (host->fifo_depth) / 2;
724 tx_wmark_invers = host->fifo_depth - tx_wmark;
728 * if blksz is not a multiple of the FIFO width
730 if (blksz % fifo_width) {
737 if (!((blksz_depth % mszs[idx]) ||
738 (tx_wmark_invers % mszs[idx]))) {
740 rx_wmark = mszs[idx] - 1;
745 * If idx is '0', it won't be tried
746 * Thus, initial values are uesed
749 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
750 mci_writel(host, FIFOTH, fifoth_val);
754 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
756 unsigned int blksz = data->blksz;
757 u32 blksz_depth, fifo_depth;
760 WARN_ON(!(data->flags & MMC_DATA_READ));
763 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
764 * in the FIFO region, so we really shouldn't access it).
766 if (host->verid < DW_MMC_240A)
769 if (host->timing != MMC_TIMING_MMC_HS200 &&
770 host->timing != MMC_TIMING_MMC_HS400 &&
771 host->timing != MMC_TIMING_UHS_SDR104)
774 blksz_depth = blksz / (1 << host->data_shift);
775 fifo_depth = host->fifo_depth;
777 if (blksz_depth > fifo_depth)
781 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
782 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
783 * Currently just choose blksz.
786 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
790 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
793 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
795 unsigned long irqflags;
801 /* If we don't have a channel, we can't do DMA */
805 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
807 host->dma_ops->stop(host);
814 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
815 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
819 * Decide the MSIZE and RX/TX Watermark.
820 * If current block size is same with previous size,
821 * no need to update fifoth.
823 if (host->prev_blksz != data->blksz)
824 dw_mci_adjust_fifoth(host, data);
826 /* Enable the DMA interface */
827 temp = mci_readl(host, CTRL);
828 temp |= SDMMC_CTRL_DMA_ENABLE;
829 mci_writel(host, CTRL, temp);
831 /* Disable RX/TX IRQs, let DMA handle it */
832 spin_lock_irqsave(&host->irq_lock, irqflags);
833 temp = mci_readl(host, INTMASK);
834 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
835 mci_writel(host, INTMASK, temp);
836 spin_unlock_irqrestore(&host->irq_lock, irqflags);
838 host->dma_ops->start(host, sg_len);
843 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
845 unsigned long irqflags;
848 data->error = -EINPROGRESS;
854 if (data->flags & MMC_DATA_READ) {
855 host->dir_status = DW_MCI_RECV_STATUS;
856 dw_mci_ctrl_rd_thld(host, data);
858 host->dir_status = DW_MCI_SEND_STATUS;
861 if (dw_mci_submit_data_dma(host, data)) {
862 int flags = SG_MITER_ATOMIC;
863 if (host->data->flags & MMC_DATA_READ)
864 flags |= SG_MITER_TO_SG;
866 flags |= SG_MITER_FROM_SG;
868 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
870 host->part_buf_start = 0;
871 host->part_buf_count = 0;
873 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
875 spin_lock_irqsave(&host->irq_lock, irqflags);
876 temp = mci_readl(host, INTMASK);
877 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
878 mci_writel(host, INTMASK, temp);
879 spin_unlock_irqrestore(&host->irq_lock, irqflags);
881 temp = mci_readl(host, CTRL);
882 temp &= ~SDMMC_CTRL_DMA_ENABLE;
883 mci_writel(host, CTRL, temp);
886 * Use the initial fifoth_val for PIO mode.
887 * If next issued data may be transfered by DMA mode,
888 * prev_blksz should be invalidated.
890 mci_writel(host, FIFOTH, host->fifoth_val);
891 host->prev_blksz = 0;
894 * Keep the current block size.
895 * It will be used to decide whether to update
896 * fifoth register next time.
898 host->prev_blksz = data->blksz;
902 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
904 struct dw_mci *host = slot->host;
905 unsigned long timeout = jiffies + msecs_to_jiffies(500);
906 unsigned int cmd_status = 0;
908 mci_writel(host, CMDARG, arg);
910 dw_mci_wait_while_busy(host, cmd);
911 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
913 while (time_before(jiffies, timeout)) {
914 cmd_status = mci_readl(host, CMD);
915 if (!(cmd_status & SDMMC_CMD_START))
918 dev_err(&slot->mmc->class_dev,
919 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
920 cmd, arg, cmd_status);
923 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
925 struct dw_mci *host = slot->host;
926 unsigned int clock = slot->clock;
929 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
931 /* We must continue to set bit 28 in CMD until the change is complete */
932 if (host->state == STATE_WAITING_CMD11_DONE)
933 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
936 mci_writel(host, CLKENA, 0);
937 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
938 } else if (clock != host->current_speed || force_clkinit) {
939 div = host->bus_hz / clock;
940 if (host->bus_hz % clock && host->bus_hz > clock)
942 * move the + 1 after the divide to prevent
943 * over-clocking the card.
947 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
949 if ((clock << div) != slot->__clk_old || force_clkinit)
950 dev_info(&slot->mmc->class_dev,
951 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
952 slot->id, host->bus_hz, clock,
953 div ? ((host->bus_hz / div) >> 1) :
957 mci_writel(host, CLKENA, 0);
958 mci_writel(host, CLKSRC, 0);
961 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
963 /* set clock to desired speed */
964 mci_writel(host, CLKDIV, div);
967 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
969 /* enable clock; only low power if no SDIO */
970 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
971 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
972 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
973 mci_writel(host, CLKENA, clk_en_a);
976 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
978 /* keep the clock with reflecting clock dividor */
979 slot->__clk_old = clock << div;
982 host->current_speed = clock;
984 /* Set the current slot bus width */
985 mci_writel(host, CTYPE, (slot->ctype << slot->id));
988 static void __dw_mci_start_request(struct dw_mci *host,
989 struct dw_mci_slot *slot,
990 struct mmc_command *cmd)
992 struct mmc_request *mrq;
993 struct mmc_data *data;
998 host->cur_slot = slot;
1001 host->pending_events = 0;
1002 host->completed_events = 0;
1003 host->cmd_status = 0;
1004 host->data_status = 0;
1005 host->dir_status = 0;
1009 mci_writel(host, TMOUT, 0xFFFFFFFF);
1010 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1011 mci_writel(host, BLKSIZ, data->blksz);
1014 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1016 /* this is the first command, send the initialization clock */
1017 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1018 cmdflags |= SDMMC_CMD_INIT;
1021 dw_mci_submit_data(host, data);
1025 dw_mci_start_command(host, cmd, cmdflags);
1027 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1028 unsigned long irqflags;
1031 * Databook says to fail after 2ms w/ no response, but evidence
1032 * shows that sometimes the cmd11 interrupt takes over 130ms.
1033 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1034 * is just about to roll over.
1036 * We do this whole thing under spinlock and only if the
1037 * command hasn't already completed (indicating the the irq
1038 * already ran so we don't want the timeout).
1040 spin_lock_irqsave(&host->irq_lock, irqflags);
1041 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1042 mod_timer(&host->cmd11_timer,
1043 jiffies + msecs_to_jiffies(500) + 1);
1044 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1048 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1050 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1053 static void dw_mci_start_request(struct dw_mci *host,
1054 struct dw_mci_slot *slot)
1056 struct mmc_request *mrq = slot->mrq;
1057 struct mmc_command *cmd;
1059 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1060 __dw_mci_start_request(host, slot, cmd);
1063 /* must be called with host->lock held */
1064 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1065 struct mmc_request *mrq)
1067 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1072 if (host->state == STATE_WAITING_CMD11_DONE) {
1073 dev_warn(&slot->mmc->class_dev,
1074 "Voltage change didn't complete\n");
1076 * this case isn't expected to happen, so we can
1077 * either crash here or just try to continue on
1078 * in the closest possible state
1080 host->state = STATE_IDLE;
1083 if (host->state == STATE_IDLE) {
1084 host->state = STATE_SENDING_CMD;
1085 dw_mci_start_request(host, slot);
1087 list_add_tail(&slot->queue_node, &host->queue);
1091 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1093 struct dw_mci_slot *slot = mmc_priv(mmc);
1094 struct dw_mci *host = slot->host;
1099 * The check for card presence and queueing of the request must be
1100 * atomic, otherwise the card could be removed in between and the
1101 * request wouldn't fail until another card was inserted.
1103 spin_lock_bh(&host->lock);
1105 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1106 spin_unlock_bh(&host->lock);
1107 mrq->cmd->error = -ENOMEDIUM;
1108 mmc_request_done(mmc, mrq);
1112 dw_mci_queue_request(host, slot, mrq);
1114 spin_unlock_bh(&host->lock);
1117 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1119 struct dw_mci_slot *slot = mmc_priv(mmc);
1120 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1124 switch (ios->bus_width) {
1125 case MMC_BUS_WIDTH_4:
1126 slot->ctype = SDMMC_CTYPE_4BIT;
1128 case MMC_BUS_WIDTH_8:
1129 slot->ctype = SDMMC_CTYPE_8BIT;
1132 /* set default 1 bit mode */
1133 slot->ctype = SDMMC_CTYPE_1BIT;
1136 regs = mci_readl(slot->host, UHS_REG);
1139 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1140 ios->timing == MMC_TIMING_MMC_HS400)
1141 regs |= ((0x1 << slot->id) << 16);
1143 regs &= ~((0x1 << slot->id) << 16);
1145 mci_writel(slot->host, UHS_REG, regs);
1146 slot->host->timing = ios->timing;
1149 * Use mirror of ios->clock to prevent race with mmc
1150 * core ios update when finding the minimum.
1152 slot->clock = ios->clock;
1154 if (drv_data && drv_data->set_ios)
1155 drv_data->set_ios(slot->host, ios);
1157 switch (ios->power_mode) {
1159 if (!IS_ERR(mmc->supply.vmmc)) {
1160 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1163 dev_err(slot->host->dev,
1164 "failed to enable vmmc regulator\n");
1165 /*return, if failed turn on vmmc*/
1169 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1170 regs = mci_readl(slot->host, PWREN);
1171 regs |= (1 << slot->id);
1172 mci_writel(slot->host, PWREN, regs);
1175 if (!slot->host->vqmmc_enabled) {
1176 if (!IS_ERR(mmc->supply.vqmmc)) {
1177 ret = regulator_enable(mmc->supply.vqmmc);
1179 dev_err(slot->host->dev,
1180 "failed to enable vqmmc\n");
1182 slot->host->vqmmc_enabled = true;
1185 /* Keep track so we don't reset again */
1186 slot->host->vqmmc_enabled = true;
1189 /* Reset our state machine after powering on */
1190 dw_mci_ctrl_reset(slot->host,
1191 SDMMC_CTRL_ALL_RESET_FLAGS);
1194 /* Adjust clock / bus width after power is up */
1195 dw_mci_setup_bus(slot, false);
1199 /* Turn clock off before power goes down */
1200 dw_mci_setup_bus(slot, false);
1202 if (!IS_ERR(mmc->supply.vmmc))
1203 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1205 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1206 regulator_disable(mmc->supply.vqmmc);
1207 slot->host->vqmmc_enabled = false;
1209 regs = mci_readl(slot->host, PWREN);
1210 regs &= ~(1 << slot->id);
1211 mci_writel(slot->host, PWREN, regs);
1217 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1218 slot->host->state = STATE_IDLE;
1221 static int dw_mci_card_busy(struct mmc_host *mmc)
1223 struct dw_mci_slot *slot = mmc_priv(mmc);
1227 * Check the busy bit which is low when DAT[3:0]
1228 * (the data lines) are 0000
1230 status = mci_readl(slot->host, STATUS);
1232 return !!(status & SDMMC_STATUS_BUSY);
1235 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1237 struct dw_mci_slot *slot = mmc_priv(mmc);
1238 struct dw_mci *host = slot->host;
1240 u32 v18 = SDMMC_UHS_18V << slot->id;
1245 * Program the voltage. Note that some instances of dw_mmc may use
1246 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1247 * does no harm but you need to set the regulator directly. Try both.
1249 uhs = mci_readl(host, UHS_REG);
1250 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1259 if (!IS_ERR(mmc->supply.vqmmc)) {
1260 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1263 dev_dbg(&mmc->class_dev,
1264 "Regulator set error %d: %d - %d\n",
1265 ret, min_uv, max_uv);
1269 mci_writel(host, UHS_REG, uhs);
1274 static int dw_mci_get_ro(struct mmc_host *mmc)
1277 struct dw_mci_slot *slot = mmc_priv(mmc);
1278 int gpio_ro = mmc_gpio_get_ro(mmc);
1280 /* Use platform get_ro function, else try on board write protect */
1281 if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1282 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1284 else if (!IS_ERR_VALUE(gpio_ro))
1285 read_only = gpio_ro;
1288 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1290 dev_dbg(&mmc->class_dev, "card is %s\n",
1291 read_only ? "read-only" : "read-write");
1296 static int dw_mci_get_cd(struct mmc_host *mmc)
1299 struct dw_mci_slot *slot = mmc_priv(mmc);
1300 struct dw_mci_board *brd = slot->host->pdata;
1301 struct dw_mci *host = slot->host;
1302 int gpio_cd = mmc_gpio_get_cd(mmc);
1304 /* Use platform get_cd function, else try onboard card detect */
1305 if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
1306 (mmc->caps & MMC_CAP_NONREMOVABLE))
1308 else if (!IS_ERR_VALUE(gpio_cd))
1311 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1314 spin_lock_bh(&host->lock);
1316 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1317 dev_dbg(&mmc->class_dev, "card is present\n");
1319 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1320 dev_dbg(&mmc->class_dev, "card is not present\n");
1322 spin_unlock_bh(&host->lock);
1327 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1329 struct dw_mci_slot *slot = mmc_priv(mmc);
1330 struct dw_mci *host = slot->host;
1333 * Low power mode will stop the card clock when idle. According to the
1334 * description of the CLKENA register we should disable low power mode
1335 * for SDIO cards if we need SDIO interrupts to work.
1337 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1338 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1342 clk_en_a_old = mci_readl(host, CLKENA);
1344 if (card->type == MMC_TYPE_SDIO ||
1345 card->type == MMC_TYPE_SD_COMBO) {
1346 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1347 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1349 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1350 clk_en_a = clk_en_a_old | clken_low_pwr;
1353 if (clk_en_a != clk_en_a_old) {
1354 mci_writel(host, CLKENA, clk_en_a);
1355 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1356 SDMMC_CMD_PRV_DAT_WAIT, 0);
1361 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1363 struct dw_mci_slot *slot = mmc_priv(mmc);
1364 struct dw_mci *host = slot->host;
1365 unsigned long irqflags;
1368 spin_lock_irqsave(&host->irq_lock, irqflags);
1370 /* Enable/disable Slot Specific SDIO interrupt */
1371 int_mask = mci_readl(host, INTMASK);
1373 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1375 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1376 mci_writel(host, INTMASK, int_mask);
1378 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1381 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1383 struct dw_mci_slot *slot = mmc_priv(mmc);
1384 struct dw_mci *host = slot->host;
1385 const struct dw_mci_drv_data *drv_data = host->drv_data;
1388 if (drv_data && drv_data->execute_tuning)
1389 err = drv_data->execute_tuning(slot);
1393 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1395 struct dw_mci_slot *slot = mmc_priv(mmc);
1396 struct dw_mci *host = slot->host;
1397 const struct dw_mci_drv_data *drv_data = host->drv_data;
1399 if (drv_data && drv_data->prepare_hs400_tuning)
1400 return drv_data->prepare_hs400_tuning(host, ios);
1405 static const struct mmc_host_ops dw_mci_ops = {
1406 .request = dw_mci_request,
1407 .pre_req = dw_mci_pre_req,
1408 .post_req = dw_mci_post_req,
1409 .set_ios = dw_mci_set_ios,
1410 .get_ro = dw_mci_get_ro,
1411 .get_cd = dw_mci_get_cd,
1412 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1413 .execute_tuning = dw_mci_execute_tuning,
1414 .card_busy = dw_mci_card_busy,
1415 .start_signal_voltage_switch = dw_mci_switch_voltage,
1416 .init_card = dw_mci_init_card,
1417 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1420 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1421 __releases(&host->lock)
1422 __acquires(&host->lock)
1424 struct dw_mci_slot *slot;
1425 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1427 WARN_ON(host->cmd || host->data);
1429 host->cur_slot->mrq = NULL;
1431 if (!list_empty(&host->queue)) {
1432 slot = list_entry(host->queue.next,
1433 struct dw_mci_slot, queue_node);
1434 list_del(&slot->queue_node);
1435 dev_vdbg(host->dev, "list not empty: %s is next\n",
1436 mmc_hostname(slot->mmc));
1437 host->state = STATE_SENDING_CMD;
1438 dw_mci_start_request(host, slot);
1440 dev_vdbg(host->dev, "list empty\n");
1442 if (host->state == STATE_SENDING_CMD11)
1443 host->state = STATE_WAITING_CMD11_DONE;
1445 host->state = STATE_IDLE;
1448 spin_unlock(&host->lock);
1449 mmc_request_done(prev_mmc, mrq);
1450 spin_lock(&host->lock);
1453 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1455 u32 status = host->cmd_status;
1457 host->cmd_status = 0;
1459 /* Read the response from the card (up to 16 bytes) */
1460 if (cmd->flags & MMC_RSP_PRESENT) {
1461 if (cmd->flags & MMC_RSP_136) {
1462 cmd->resp[3] = mci_readl(host, RESP0);
1463 cmd->resp[2] = mci_readl(host, RESP1);
1464 cmd->resp[1] = mci_readl(host, RESP2);
1465 cmd->resp[0] = mci_readl(host, RESP3);
1467 cmd->resp[0] = mci_readl(host, RESP0);
1474 if (status & SDMMC_INT_RTO)
1475 cmd->error = -ETIMEDOUT;
1476 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1477 cmd->error = -EILSEQ;
1478 else if (status & SDMMC_INT_RESP_ERR)
1484 /* newer ip versions need a delay between retries */
1485 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1492 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1494 u32 status = host->data_status;
1496 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1497 if (status & SDMMC_INT_DRTO) {
1498 data->error = -ETIMEDOUT;
1499 } else if (status & SDMMC_INT_DCRC) {
1500 data->error = -EILSEQ;
1501 } else if (status & SDMMC_INT_EBE) {
1502 if (host->dir_status ==
1503 DW_MCI_SEND_STATUS) {
1505 * No data CRC status was returned.
1506 * The number of bytes transferred
1507 * will be exaggerated in PIO mode.
1509 data->bytes_xfered = 0;
1510 data->error = -ETIMEDOUT;
1511 } else if (host->dir_status ==
1512 DW_MCI_RECV_STATUS) {
1516 /* SDMMC_INT_SBE is included */
1520 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1523 * After an error, there may be data lingering
1528 data->bytes_xfered = data->blocks * data->blksz;
1535 static void dw_mci_tasklet_func(unsigned long priv)
1537 struct dw_mci *host = (struct dw_mci *)priv;
1538 struct mmc_data *data;
1539 struct mmc_command *cmd;
1540 struct mmc_request *mrq;
1541 enum dw_mci_state state;
1542 enum dw_mci_state prev_state;
1545 spin_lock(&host->lock);
1547 state = host->state;
1556 case STATE_WAITING_CMD11_DONE:
1559 case STATE_SENDING_CMD11:
1560 case STATE_SENDING_CMD:
1561 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1562 &host->pending_events))
1567 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1568 err = dw_mci_command_complete(host, cmd);
1569 if (cmd == mrq->sbc && !err) {
1570 prev_state = state = STATE_SENDING_CMD;
1571 __dw_mci_start_request(host, host->cur_slot,
1576 if (cmd->data && err) {
1577 dw_mci_stop_dma(host);
1578 send_stop_abort(host, data);
1579 state = STATE_SENDING_STOP;
1583 if (!cmd->data || err) {
1584 dw_mci_request_end(host, mrq);
1588 prev_state = state = STATE_SENDING_DATA;
1591 case STATE_SENDING_DATA:
1593 * We could get a data error and never a transfer
1594 * complete so we'd better check for it here.
1596 * Note that we don't really care if we also got a
1597 * transfer complete; stopping the DMA and sending an
1600 if (test_and_clear_bit(EVENT_DATA_ERROR,
1601 &host->pending_events)) {
1602 dw_mci_stop_dma(host);
1604 !(host->data_status & (SDMMC_INT_DRTO |
1606 send_stop_abort(host, data);
1607 state = STATE_DATA_ERROR;
1611 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1612 &host->pending_events))
1615 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1618 * Handle an EVENT_DATA_ERROR that might have shown up
1619 * before the transfer completed. This might not have
1620 * been caught by the check above because the interrupt
1621 * could have gone off between the previous check and
1622 * the check for transfer complete.
1624 * Technically this ought not be needed assuming we
1625 * get a DATA_COMPLETE eventually (we'll notice the
1626 * error and end the request), but it shouldn't hurt.
1628 * This has the advantage of sending the stop command.
1630 if (test_and_clear_bit(EVENT_DATA_ERROR,
1631 &host->pending_events)) {
1632 dw_mci_stop_dma(host);
1634 !(host->data_status & (SDMMC_INT_DRTO |
1636 send_stop_abort(host, data);
1637 state = STATE_DATA_ERROR;
1640 prev_state = state = STATE_DATA_BUSY;
1644 case STATE_DATA_BUSY:
1645 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1646 &host->pending_events))
1650 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1651 err = dw_mci_data_complete(host, data);
1654 if (!data->stop || mrq->sbc) {
1655 if (mrq->sbc && data->stop)
1656 data->stop->error = 0;
1657 dw_mci_request_end(host, mrq);
1661 /* stop command for open-ended transfer*/
1663 send_stop_abort(host, data);
1666 * If we don't have a command complete now we'll
1667 * never get one since we just reset everything;
1668 * better end the request.
1670 * If we do have a command complete we'll fall
1671 * through to the SENDING_STOP command and
1672 * everything will be peachy keen.
1674 if (!test_bit(EVENT_CMD_COMPLETE,
1675 &host->pending_events)) {
1677 dw_mci_request_end(host, mrq);
1683 * If err has non-zero,
1684 * stop-abort command has been already issued.
1686 prev_state = state = STATE_SENDING_STOP;
1690 case STATE_SENDING_STOP:
1691 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1692 &host->pending_events))
1695 /* CMD error in data command */
1696 if (mrq->cmd->error && mrq->data)
1703 dw_mci_command_complete(host, mrq->stop);
1705 host->cmd_status = 0;
1707 dw_mci_request_end(host, mrq);
1710 case STATE_DATA_ERROR:
1711 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1712 &host->pending_events))
1715 state = STATE_DATA_BUSY;
1718 } while (state != prev_state);
1720 host->state = state;
1722 spin_unlock(&host->lock);
1726 /* push final bytes to part_buf, only use during push */
1727 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1729 memcpy((void *)&host->part_buf, buf, cnt);
1730 host->part_buf_count = cnt;
1733 /* append bytes to part_buf, only use during push */
1734 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1736 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1737 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1738 host->part_buf_count += cnt;
1742 /* pull first bytes from part_buf, only use during pull */
1743 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1745 cnt = min(cnt, (int)host->part_buf_count);
1747 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1749 host->part_buf_count -= cnt;
1750 host->part_buf_start += cnt;
1755 /* pull final bytes from the part_buf, assuming it's just been filled */
1756 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1758 memcpy(buf, &host->part_buf, cnt);
1759 host->part_buf_start = cnt;
1760 host->part_buf_count = (1 << host->data_shift) - cnt;
1763 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1765 struct mmc_data *data = host->data;
1768 /* try and push anything in the part_buf */
1769 if (unlikely(host->part_buf_count)) {
1770 int len = dw_mci_push_part_bytes(host, buf, cnt);
1773 if (host->part_buf_count == 2) {
1774 mci_fifo_writew(host->fifo_reg, host->part_buf16);
1775 host->part_buf_count = 0;
1778 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1779 if (unlikely((unsigned long)buf & 0x1)) {
1781 u16 aligned_buf[64];
1782 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1783 int items = len >> 1;
1785 /* memcpy from input buffer into aligned buffer */
1786 memcpy(aligned_buf, buf, len);
1789 /* push data from aligned buffer into fifo */
1790 for (i = 0; i < items; ++i)
1791 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
1797 for (; cnt >= 2; cnt -= 2)
1798 mci_fifo_writew(host->fifo_reg, *pdata++);
1801 /* put anything remaining in the part_buf */
1803 dw_mci_set_part_bytes(host, buf, cnt);
1804 /* Push data if we have reached the expected data length */
1805 if ((data->bytes_xfered + init_cnt) ==
1806 (data->blksz * data->blocks))
1807 mci_fifo_writew(host->fifo_reg, host->part_buf16);
1811 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1813 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1814 if (unlikely((unsigned long)buf & 0x1)) {
1816 /* pull data from fifo into aligned buffer */
1817 u16 aligned_buf[64];
1818 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1819 int items = len >> 1;
1821 for (i = 0; i < items; ++i)
1822 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
1823 /* memcpy from aligned buffer into output buffer */
1824 memcpy(buf, aligned_buf, len);
1832 for (; cnt >= 2; cnt -= 2)
1833 *pdata++ = mci_fifo_readw(host->fifo_reg);
1837 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
1838 dw_mci_pull_final_bytes(host, buf, cnt);
1842 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1844 struct mmc_data *data = host->data;
1847 /* try and push anything in the part_buf */
1848 if (unlikely(host->part_buf_count)) {
1849 int len = dw_mci_push_part_bytes(host, buf, cnt);
1852 if (host->part_buf_count == 4) {
1853 mci_fifo_writel(host->fifo_reg, host->part_buf32);
1854 host->part_buf_count = 0;
1857 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1858 if (unlikely((unsigned long)buf & 0x3)) {
1860 u32 aligned_buf[32];
1861 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1862 int items = len >> 2;
1864 /* memcpy from input buffer into aligned buffer */
1865 memcpy(aligned_buf, buf, len);
1868 /* push data from aligned buffer into fifo */
1869 for (i = 0; i < items; ++i)
1870 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
1876 for (; cnt >= 4; cnt -= 4)
1877 mci_fifo_writel(host->fifo_reg, *pdata++);
1880 /* put anything remaining in the part_buf */
1882 dw_mci_set_part_bytes(host, buf, cnt);
1883 /* Push data if we have reached the expected data length */
1884 if ((data->bytes_xfered + init_cnt) ==
1885 (data->blksz * data->blocks))
1886 mci_fifo_writel(host->fifo_reg, host->part_buf32);
1890 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1892 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1893 if (unlikely((unsigned long)buf & 0x3)) {
1895 /* pull data from fifo into aligned buffer */
1896 u32 aligned_buf[32];
1897 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1898 int items = len >> 2;
1900 for (i = 0; i < items; ++i)
1901 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
1902 /* memcpy from aligned buffer into output buffer */
1903 memcpy(buf, aligned_buf, len);
1911 for (; cnt >= 4; cnt -= 4)
1912 *pdata++ = mci_fifo_readl(host->fifo_reg);
1916 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
1917 dw_mci_pull_final_bytes(host, buf, cnt);
1921 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1923 struct mmc_data *data = host->data;
1926 /* try and push anything in the part_buf */
1927 if (unlikely(host->part_buf_count)) {
1928 int len = dw_mci_push_part_bytes(host, buf, cnt);
1932 if (host->part_buf_count == 8) {
1933 mci_fifo_writeq(host->fifo_reg, host->part_buf);
1934 host->part_buf_count = 0;
1937 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1938 if (unlikely((unsigned long)buf & 0x7)) {
1940 u64 aligned_buf[16];
1941 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1942 int items = len >> 3;
1944 /* memcpy from input buffer into aligned buffer */
1945 memcpy(aligned_buf, buf, len);
1948 /* push data from aligned buffer into fifo */
1949 for (i = 0; i < items; ++i)
1950 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
1956 for (; cnt >= 8; cnt -= 8)
1957 mci_fifo_writeq(host->fifo_reg, *pdata++);
1960 /* put anything remaining in the part_buf */
1962 dw_mci_set_part_bytes(host, buf, cnt);
1963 /* Push data if we have reached the expected data length */
1964 if ((data->bytes_xfered + init_cnt) ==
1965 (data->blksz * data->blocks))
1966 mci_fifo_writeq(host->fifo_reg, host->part_buf);
1970 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1972 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1973 if (unlikely((unsigned long)buf & 0x7)) {
1975 /* pull data from fifo into aligned buffer */
1976 u64 aligned_buf[16];
1977 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1978 int items = len >> 3;
1980 for (i = 0; i < items; ++i)
1981 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
1983 /* memcpy from aligned buffer into output buffer */
1984 memcpy(buf, aligned_buf, len);
1992 for (; cnt >= 8; cnt -= 8)
1993 *pdata++ = mci_fifo_readq(host->fifo_reg);
1997 host->part_buf = mci_fifo_readq(host->fifo_reg);
1998 dw_mci_pull_final_bytes(host, buf, cnt);
2002 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2006 /* get remaining partial bytes */
2007 len = dw_mci_pull_part_bytes(host, buf, cnt);
2008 if (unlikely(len == cnt))
2013 /* get the rest of the data */
2014 host->pull_data(host, buf, cnt);
2017 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2019 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2021 unsigned int offset;
2022 struct mmc_data *data = host->data;
2023 int shift = host->data_shift;
2026 unsigned int remain, fcnt;
2029 if (!sg_miter_next(sg_miter))
2032 host->sg = sg_miter->piter.sg;
2033 buf = sg_miter->addr;
2034 remain = sg_miter->length;
2038 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2039 << shift) + host->part_buf_count;
2040 len = min(remain, fcnt);
2043 dw_mci_pull_data(host, (void *)(buf + offset), len);
2044 data->bytes_xfered += len;
2049 sg_miter->consumed = offset;
2050 status = mci_readl(host, MINTSTS);
2051 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2052 /* if the RXDR is ready read again */
2053 } while ((status & SDMMC_INT_RXDR) ||
2054 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2057 if (!sg_miter_next(sg_miter))
2059 sg_miter->consumed = 0;
2061 sg_miter_stop(sg_miter);
2065 sg_miter_stop(sg_miter);
2068 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2071 static void dw_mci_write_data_pio(struct dw_mci *host)
2073 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2075 unsigned int offset;
2076 struct mmc_data *data = host->data;
2077 int shift = host->data_shift;
2080 unsigned int fifo_depth = host->fifo_depth;
2081 unsigned int remain, fcnt;
2084 if (!sg_miter_next(sg_miter))
2087 host->sg = sg_miter->piter.sg;
2088 buf = sg_miter->addr;
2089 remain = sg_miter->length;
2093 fcnt = ((fifo_depth -
2094 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2095 << shift) - host->part_buf_count;
2096 len = min(remain, fcnt);
2099 host->push_data(host, (void *)(buf + offset), len);
2100 data->bytes_xfered += len;
2105 sg_miter->consumed = offset;
2106 status = mci_readl(host, MINTSTS);
2107 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2108 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2111 if (!sg_miter_next(sg_miter))
2113 sg_miter->consumed = 0;
2115 sg_miter_stop(sg_miter);
2119 sg_miter_stop(sg_miter);
2122 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2125 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2127 if (!host->cmd_status)
2128 host->cmd_status = status;
2132 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2133 tasklet_schedule(&host->tasklet);
2136 static void dw_mci_handle_cd(struct dw_mci *host)
2140 for (i = 0; i < host->num_slots; i++) {
2141 struct dw_mci_slot *slot = host->slot[i];
2146 if (slot->mmc->ops->card_event)
2147 slot->mmc->ops->card_event(slot->mmc);
2148 mmc_detect_change(slot->mmc,
2149 msecs_to_jiffies(host->pdata->detect_delay_ms));
2153 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2155 struct dw_mci *host = dev_id;
2159 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2162 * DTO fix - version 2.10a and below, and only if internal DMA
2165 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2167 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2168 pending |= SDMMC_INT_DATA_OVER;
2172 /* Check volt switch first, since it can look like an error */
2173 if ((host->state == STATE_SENDING_CMD11) &&
2174 (pending & SDMMC_INT_VOLT_SWITCH)) {
2175 unsigned long irqflags;
2177 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2178 pending &= ~SDMMC_INT_VOLT_SWITCH;
2181 * Hold the lock; we know cmd11_timer can't be kicked
2182 * off after the lock is released, so safe to delete.
2184 spin_lock_irqsave(&host->irq_lock, irqflags);
2185 dw_mci_cmd_interrupt(host, pending);
2186 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2188 del_timer(&host->cmd11_timer);
2191 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2192 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2193 host->cmd_status = pending;
2195 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2198 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2199 /* if there is an error report DATA_ERROR */
2200 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2201 host->data_status = pending;
2203 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2204 tasklet_schedule(&host->tasklet);
2207 if (pending & SDMMC_INT_DATA_OVER) {
2208 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2209 if (!host->data_status)
2210 host->data_status = pending;
2212 if (host->dir_status == DW_MCI_RECV_STATUS) {
2213 if (host->sg != NULL)
2214 dw_mci_read_data_pio(host, true);
2216 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2217 tasklet_schedule(&host->tasklet);
2220 if (pending & SDMMC_INT_RXDR) {
2221 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2222 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2223 dw_mci_read_data_pio(host, false);
2226 if (pending & SDMMC_INT_TXDR) {
2227 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2228 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2229 dw_mci_write_data_pio(host);
2232 if (pending & SDMMC_INT_CMD_DONE) {
2233 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2234 dw_mci_cmd_interrupt(host, pending);
2237 if (pending & SDMMC_INT_CD) {
2238 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2239 dw_mci_handle_cd(host);
2242 /* Handle SDIO Interrupts */
2243 for (i = 0; i < host->num_slots; i++) {
2244 struct dw_mci_slot *slot = host->slot[i];
2249 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2250 mci_writel(host, RINTSTS,
2251 SDMMC_INT_SDIO(slot->sdio_id));
2252 mmc_signal_sdio_irq(slot->mmc);
2258 #ifdef CONFIG_MMC_DW_IDMAC
2259 /* Handle DMA interrupts */
2260 if (host->dma_64bit_address == 1) {
2261 pending = mci_readl(host, IDSTS64);
2262 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2263 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2264 SDMMC_IDMAC_INT_RI);
2265 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2266 host->dma_ops->complete(host);
2269 pending = mci_readl(host, IDSTS);
2270 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2271 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2272 SDMMC_IDMAC_INT_RI);
2273 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2274 host->dma_ops->complete(host);
2283 /* given a slot id, find out the device node representing that slot */
2284 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2286 struct device_node *np;
2290 if (!dev || !dev->of_node)
2293 for_each_child_of_node(dev->of_node, np) {
2294 addr = of_get_property(np, "reg", &len);
2295 if (!addr || (len < sizeof(int)))
2297 if (be32_to_cpup(addr) == slot)
2303 static struct dw_mci_of_slot_quirks {
2306 } of_slot_quirks[] = {
2308 .quirk = "disable-wp",
2309 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2313 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2315 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2320 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2321 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2322 dev_warn(dev, "Slot quirk %s is deprecated\n",
2323 of_slot_quirks[idx].quirk);
2324 quirks |= of_slot_quirks[idx].id;
2329 #else /* CONFIG_OF */
2330 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2334 #endif /* CONFIG_OF */
2336 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2338 struct mmc_host *mmc;
2339 struct dw_mci_slot *slot;
2340 const struct dw_mci_drv_data *drv_data = host->drv_data;
2344 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2348 slot = mmc_priv(mmc);
2350 slot->sdio_id = host->sdio_id0 + id;
2353 host->slot[id] = slot;
2355 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2357 mmc->ops = &dw_mci_ops;
2358 if (of_property_read_u32_array(host->dev->of_node,
2359 "clock-freq-min-max", freq, 2)) {
2360 mmc->f_min = DW_MCI_FREQ_MIN;
2361 mmc->f_max = DW_MCI_FREQ_MAX;
2363 mmc->f_min = freq[0];
2364 mmc->f_max = freq[1];
2367 /*if there are external regulators, get them*/
2368 ret = mmc_regulator_get_supply(mmc);
2369 if (ret == -EPROBE_DEFER)
2370 goto err_host_allocated;
2372 if (!mmc->ocr_avail)
2373 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2375 if (host->pdata->caps)
2376 mmc->caps = host->pdata->caps;
2378 if (host->pdata->pm_caps)
2379 mmc->pm_caps = host->pdata->pm_caps;
2381 if (host->dev->of_node) {
2382 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2386 ctrl_id = to_platform_device(host->dev)->id;
2388 if (drv_data && drv_data->caps)
2389 mmc->caps |= drv_data->caps[ctrl_id];
2391 if (host->pdata->caps2)
2392 mmc->caps2 = host->pdata->caps2;
2394 ret = mmc_of_parse(mmc);
2396 goto err_host_allocated;
2398 if (host->pdata->blk_settings) {
2399 mmc->max_segs = host->pdata->blk_settings->max_segs;
2400 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2401 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2402 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2403 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2405 /* Useful defaults if platform data is unset. */
2406 #ifdef CONFIG_MMC_DW_IDMAC
2407 mmc->max_segs = host->ring_size;
2408 mmc->max_blk_size = 65536;
2409 mmc->max_seg_size = 0x1000;
2410 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2411 mmc->max_blk_count = mmc->max_req_size / 512;
2414 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2415 mmc->max_blk_count = 512;
2416 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2417 mmc->max_seg_size = mmc->max_req_size;
2418 #endif /* CONFIG_MMC_DW_IDMAC */
2421 if (dw_mci_get_cd(mmc))
2422 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2424 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2426 ret = mmc_add_host(mmc);
2428 goto err_host_allocated;
2430 #if defined(CONFIG_DEBUG_FS)
2431 dw_mci_init_debugfs(slot);
2441 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2443 /* Debugfs stuff is cleaned up by mmc core */
2444 mmc_remove_host(slot->mmc);
2445 slot->host->slot[id] = NULL;
2446 mmc_free_host(slot->mmc);
2449 static void dw_mci_init_dma(struct dw_mci *host)
2452 /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
2453 addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
2455 if (addr_config == 1) {
2456 /* host supports IDMAC in 64-bit address mode */
2457 host->dma_64bit_address = 1;
2458 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2459 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2460 dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2462 /* host supports IDMAC in 32-bit address mode */
2463 host->dma_64bit_address = 0;
2464 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2467 /* Alloc memory for sg translation */
2468 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2469 &host->sg_dma, GFP_KERNEL);
2470 if (!host->sg_cpu) {
2471 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2476 /* Determine which DMA interface to use */
2477 #ifdef CONFIG_MMC_DW_IDMAC
2478 host->dma_ops = &dw_mci_idmac_ops;
2479 dev_info(host->dev, "Using internal DMA controller.\n");
2485 if (host->dma_ops->init && host->dma_ops->start &&
2486 host->dma_ops->stop && host->dma_ops->cleanup) {
2487 if (host->dma_ops->init(host)) {
2488 dev_err(host->dev, "%s: Unable to initialize "
2489 "DMA Controller.\n", __func__);
2493 dev_err(host->dev, "DMA initialization not found.\n");
2501 dev_info(host->dev, "Using PIO mode.\n");
2506 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2508 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2511 ctrl = mci_readl(host, CTRL);
2513 mci_writel(host, CTRL, ctrl);
2515 /* wait till resets clear */
2517 ctrl = mci_readl(host, CTRL);
2518 if (!(ctrl & reset))
2520 } while (time_before(jiffies, timeout));
2523 "Timeout resetting block (ctrl reset %#x)\n",
2529 static bool dw_mci_reset(struct dw_mci *host)
2531 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2535 * Reseting generates a block interrupt, hence setting
2536 * the scatter-gather pointer to NULL.
2539 sg_miter_stop(&host->sg_miter);
2544 flags |= SDMMC_CTRL_DMA_RESET;
2546 if (dw_mci_ctrl_reset(host, flags)) {
2548 * In all cases we clear the RAWINTS register to clear any
2551 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2553 /* if using dma we wait for dma_req to clear */
2554 if (host->use_dma) {
2555 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2558 status = mci_readl(host, STATUS);
2559 if (!(status & SDMMC_STATUS_DMA_REQ))
2562 } while (time_before(jiffies, timeout));
2564 if (status & SDMMC_STATUS_DMA_REQ) {
2566 "%s: Timeout waiting for dma_req to "
2567 "clear during reset\n", __func__);
2571 /* when using DMA next we reset the fifo again */
2572 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2576 /* if the controller reset bit did clear, then set clock regs */
2577 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2578 dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2579 "clear but ciu was reset, doing clock update\n",
2585 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2586 /* It is also recommended that we reset and reprogram idmac */
2587 dw_mci_idmac_reset(host);
2593 /* After a CTRL reset we need to have CIU set clock registers */
2594 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2599 static void dw_mci_cmd11_timer(unsigned long arg)
2601 struct dw_mci *host = (struct dw_mci *)arg;
2603 if (host->state != STATE_SENDING_CMD11) {
2604 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2608 host->cmd_status = SDMMC_INT_RTO;
2609 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2610 tasklet_schedule(&host->tasklet);
2614 static struct dw_mci_of_quirks {
2619 .quirk = "broken-cd",
2620 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2622 .quirk = "disable-wp",
2623 .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2627 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2629 struct dw_mci_board *pdata;
2630 struct device *dev = host->dev;
2631 struct device_node *np = dev->of_node;
2632 const struct dw_mci_drv_data *drv_data = host->drv_data;
2634 u32 clock_frequency;
2636 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2638 return ERR_PTR(-ENOMEM);
2640 /* find out number of slots supported */
2641 if (of_property_read_u32(dev->of_node, "num-slots",
2642 &pdata->num_slots)) {
2643 dev_info(dev, "num-slots property not found, "
2644 "assuming 1 slot is available\n");
2645 pdata->num_slots = 1;
2649 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2650 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2651 pdata->quirks |= of_quirks[idx].id;
2653 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2654 dev_info(dev, "fifo-depth property not found, using "
2655 "value of FIFOTH register as default\n");
2657 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2659 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2660 pdata->bus_hz = clock_frequency;
2662 if (drv_data && drv_data->parse_dt) {
2663 ret = drv_data->parse_dt(host);
2665 return ERR_PTR(ret);
2668 if (of_find_property(np, "supports-highspeed", NULL))
2669 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2674 #else /* CONFIG_OF */
2675 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2677 return ERR_PTR(-EINVAL);
2679 #endif /* CONFIG_OF */
2681 static void dw_mci_enable_cd(struct dw_mci *host)
2683 struct dw_mci_board *brd = host->pdata;
2684 unsigned long irqflags;
2688 /* No need for CD if broken card detection */
2689 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
2692 /* No need for CD if all slots have a non-error GPIO */
2693 for (i = 0; i < host->num_slots; i++) {
2694 struct dw_mci_slot *slot = host->slot[i];
2696 if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
2699 if (i == host->num_slots)
2702 spin_lock_irqsave(&host->irq_lock, irqflags);
2703 temp = mci_readl(host, INTMASK);
2704 temp |= SDMMC_INT_CD;
2705 mci_writel(host, INTMASK, temp);
2706 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2709 int dw_mci_probe(struct dw_mci *host)
2711 const struct dw_mci_drv_data *drv_data = host->drv_data;
2712 int width, i, ret = 0;
2717 host->pdata = dw_mci_parse_dt(host);
2718 if (IS_ERR(host->pdata)) {
2719 dev_err(host->dev, "platform data not available\n");
2724 if (host->pdata->num_slots > 1) {
2726 "Platform data must supply num_slots.\n");
2730 host->biu_clk = devm_clk_get(host->dev, "biu");
2731 if (IS_ERR(host->biu_clk)) {
2732 dev_dbg(host->dev, "biu clock not available\n");
2734 ret = clk_prepare_enable(host->biu_clk);
2736 dev_err(host->dev, "failed to enable biu clock\n");
2741 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2742 if (IS_ERR(host->ciu_clk)) {
2743 dev_dbg(host->dev, "ciu clock not available\n");
2744 host->bus_hz = host->pdata->bus_hz;
2746 ret = clk_prepare_enable(host->ciu_clk);
2748 dev_err(host->dev, "failed to enable ciu clock\n");
2752 if (host->pdata->bus_hz) {
2753 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2756 "Unable to set bus rate to %uHz\n",
2757 host->pdata->bus_hz);
2759 host->bus_hz = clk_get_rate(host->ciu_clk);
2762 if (!host->bus_hz) {
2764 "Platform data must supply bus speed\n");
2769 if (drv_data && drv_data->init) {
2770 ret = drv_data->init(host);
2773 "implementation specific init failed\n");
2778 if (drv_data && drv_data->setup_clock) {
2779 ret = drv_data->setup_clock(host);
2782 "implementation specific clock setup failed\n");
2787 setup_timer(&host->cmd11_timer,
2788 dw_mci_cmd11_timer, (unsigned long)host);
2790 host->quirks = host->pdata->quirks;
2792 spin_lock_init(&host->lock);
2793 spin_lock_init(&host->irq_lock);
2794 INIT_LIST_HEAD(&host->queue);
2797 * Get the host data width - this assumes that HCON has been set with
2798 * the correct values.
2800 i = (mci_readl(host, HCON) >> 7) & 0x7;
2802 host->push_data = dw_mci_push_data16;
2803 host->pull_data = dw_mci_pull_data16;
2805 host->data_shift = 1;
2806 } else if (i == 2) {
2807 host->push_data = dw_mci_push_data64;
2808 host->pull_data = dw_mci_pull_data64;
2810 host->data_shift = 3;
2812 /* Check for a reserved value, and warn if it is */
2814 "HCON reports a reserved host data width!\n"
2815 "Defaulting to 32-bit access.\n");
2816 host->push_data = dw_mci_push_data32;
2817 host->pull_data = dw_mci_pull_data32;
2819 host->data_shift = 2;
2822 /* Reset all blocks */
2823 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2826 host->dma_ops = host->pdata->dma_ops;
2827 dw_mci_init_dma(host);
2829 /* Clear the interrupts for the host controller */
2830 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2831 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2833 /* Put in max timeout */
2834 mci_writel(host, TMOUT, 0xFFFFFFFF);
2837 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2838 * Tx Mark = fifo_size / 2 DMA Size = 8
2840 if (!host->pdata->fifo_depth) {
2842 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2843 * have been overwritten by the bootloader, just like we're
2844 * about to do, so if you know the value for your hardware, you
2845 * should put it in the platform data.
2847 fifo_size = mci_readl(host, FIFOTH);
2848 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2850 fifo_size = host->pdata->fifo_depth;
2852 host->fifo_depth = fifo_size;
2854 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2855 mci_writel(host, FIFOTH, host->fifoth_val);
2857 /* disable clock to CIU */
2858 mci_writel(host, CLKENA, 0);
2859 mci_writel(host, CLKSRC, 0);
2862 * In 2.40a spec, Data offset is changed.
2863 * Need to check the version-id and set data-offset for DATA register.
2865 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2866 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2868 if (host->verid < DW_MMC_240A)
2869 host->fifo_reg = host->regs + DATA_OFFSET;
2871 host->fifo_reg = host->regs + DATA_240A_OFFSET;
2873 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2874 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2875 host->irq_flags, "dw-mci", host);
2879 if (host->pdata->num_slots)
2880 host->num_slots = host->pdata->num_slots;
2882 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2885 * Enable interrupts for command done, data over, data empty,
2886 * receive ready and error such as transmit, receive timeout, crc error
2888 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2889 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2890 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2891 DW_MCI_ERROR_FLAGS);
2892 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2894 dev_info(host->dev, "DW MMC controller at irq %d, "
2895 "%d bit host data width, "
2897 host->irq, width, fifo_size);
2899 /* We need at least one slot to succeed */
2900 for (i = 0; i < host->num_slots; i++) {
2901 ret = dw_mci_init_slot(host, i);
2903 dev_dbg(host->dev, "slot %d init failed\n", i);
2909 dev_info(host->dev, "%d slots initialized\n", init_slots);
2911 dev_dbg(host->dev, "attempted to initialize %d slots, "
2912 "but failed on all\n", host->num_slots);
2916 /* Now that slots are all setup, we can enable card detect */
2917 dw_mci_enable_cd(host);
2919 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2920 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2925 if (host->use_dma && host->dma_ops->exit)
2926 host->dma_ops->exit(host);
2929 if (!IS_ERR(host->ciu_clk))
2930 clk_disable_unprepare(host->ciu_clk);
2933 if (!IS_ERR(host->biu_clk))
2934 clk_disable_unprepare(host->biu_clk);
2938 EXPORT_SYMBOL(dw_mci_probe);
2940 void dw_mci_remove(struct dw_mci *host)
2944 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2945 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2947 for (i = 0; i < host->num_slots; i++) {
2948 dev_dbg(host->dev, "remove slot %d\n", i);
2950 dw_mci_cleanup_slot(host->slot[i], i);
2953 /* disable clock to CIU */
2954 mci_writel(host, CLKENA, 0);
2955 mci_writel(host, CLKSRC, 0);
2957 if (host->use_dma && host->dma_ops->exit)
2958 host->dma_ops->exit(host);
2960 if (!IS_ERR(host->ciu_clk))
2961 clk_disable_unprepare(host->ciu_clk);
2963 if (!IS_ERR(host->biu_clk))
2964 clk_disable_unprepare(host->biu_clk);
2966 EXPORT_SYMBOL(dw_mci_remove);
2970 #ifdef CONFIG_PM_SLEEP
2972 * TODO: we should probably disable the clock to the card in the suspend path.
2974 int dw_mci_suspend(struct dw_mci *host)
2978 EXPORT_SYMBOL(dw_mci_suspend);
2980 int dw_mci_resume(struct dw_mci *host)
2984 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2989 if (host->use_dma && host->dma_ops->init)
2990 host->dma_ops->init(host);
2993 * Restore the initial value at FIFOTH register
2994 * And Invalidate the prev_blksz with zero
2996 mci_writel(host, FIFOTH, host->fifoth_val);
2997 host->prev_blksz = 0;
2999 /* Put in max timeout */
3000 mci_writel(host, TMOUT, 0xFFFFFFFF);
3002 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3003 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3004 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3005 DW_MCI_ERROR_FLAGS);
3006 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3008 for (i = 0; i < host->num_slots; i++) {
3009 struct dw_mci_slot *slot = host->slot[i];
3012 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3013 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3014 dw_mci_setup_bus(slot, true);
3018 /* Now that slots are all setup, we can enable card detect */
3019 dw_mci_enable_cd(host);
3023 EXPORT_SYMBOL(dw_mci_resume);
3024 #endif /* CONFIG_PM_SLEEP */
3026 static int __init dw_mci_init(void)
3028 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3032 static void __exit dw_mci_exit(void)
3036 module_init(dw_mci_init);
3037 module_exit(dw_mci_exit);
3039 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3040 MODULE_AUTHOR("NXP Semiconductor VietNam");
3041 MODULE_AUTHOR("Imagination Technologies Ltd");
3042 MODULE_LICENSE("GPL v2");