2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
38 #include <linux/of_gpio.h>
42 /* Common flag combinations */
43 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
44 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
49 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
50 #define DW_MCI_SEND_STATUS 1
51 #define DW_MCI_RECV_STATUS 2
52 #define DW_MCI_DMA_THRESHOLD 16
54 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
55 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57 #ifdef CONFIG_MMC_DW_IDMAC
58 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
64 u32 des0; /* Control Descriptor */
65 #define IDMAC_DES0_DIC BIT(1)
66 #define IDMAC_DES0_LD BIT(2)
67 #define IDMAC_DES0_FD BIT(3)
68 #define IDMAC_DES0_CH BIT(4)
69 #define IDMAC_DES0_ER BIT(5)
70 #define IDMAC_DES0_CES BIT(30)
71 #define IDMAC_DES0_OWN BIT(31)
73 u32 des1; /* Buffer sizes */
74 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
75 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
77 u32 des2; /* buffer 1 physical address */
79 u32 des3; /* buffer 2 physical address */
81 #endif /* CONFIG_MMC_DW_IDMAC */
83 static const u8 tuning_blk_pattern_4bit[] = {
84 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
85 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
86 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
87 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
88 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
89 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
90 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
91 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
94 static const u8 tuning_blk_pattern_8bit[] = {
95 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
96 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
97 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
98 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
99 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
100 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
101 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
102 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
103 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
104 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
105 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
106 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
107 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
108 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
109 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
110 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
113 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
114 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116 #if defined(CONFIG_DEBUG_FS)
117 static int dw_mci_req_show(struct seq_file *s, void *v)
119 struct dw_mci_slot *slot = s->private;
120 struct mmc_request *mrq;
121 struct mmc_command *cmd;
122 struct mmc_command *stop;
123 struct mmc_data *data;
125 /* Make sure we get a consistent snapshot */
126 spin_lock_bh(&slot->host->lock);
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 cmd->opcode, cmd->arg, cmd->flags,
138 cmd->resp[0], cmd->resp[1], cmd->resp[2],
139 cmd->resp[2], cmd->error);
141 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142 data->bytes_xfered, data->blocks,
143 data->blksz, data->flags, data->error);
146 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147 stop->opcode, stop->arg, stop->flags,
148 stop->resp[0], stop->resp[1], stop->resp[2],
149 stop->resp[2], stop->error);
152 spin_unlock_bh(&slot->host->lock);
157 static int dw_mci_req_open(struct inode *inode, struct file *file)
159 return single_open(file, dw_mci_req_show, inode->i_private);
162 static const struct file_operations dw_mci_req_fops = {
163 .owner = THIS_MODULE,
164 .open = dw_mci_req_open,
167 .release = single_release,
170 static int dw_mci_regs_show(struct seq_file *s, void *v)
172 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
182 static int dw_mci_regs_open(struct inode *inode, struct file *file)
184 return single_open(file, dw_mci_regs_show, inode->i_private);
187 static const struct file_operations dw_mci_regs_fops = {
188 .owner = THIS_MODULE,
189 .open = dw_mci_regs_open,
192 .release = single_release,
195 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197 struct mmc_host *mmc = slot->mmc;
198 struct dw_mci *host = slot->host;
202 root = mmc->debugfs_root;
206 node = debugfs_create_file("regs", S_IRUSR, root, host,
211 node = debugfs_create_file("req", S_IRUSR, root, slot,
216 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
220 node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 (u32 *)&host->pending_events);
225 node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 (u32 *)&host->completed_events);
233 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235 #endif /* defined(CONFIG_DEBUG_FS) */
237 static void dw_mci_set_timeout(struct dw_mci *host)
239 /* timeout (maximum) */
240 mci_writel(host, TMOUT, 0xffffffff);
243 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
245 struct mmc_data *data;
246 struct dw_mci_slot *slot = mmc_priv(mmc);
247 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
249 cmd->error = -EINPROGRESS;
253 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
254 cmd->opcode == MMC_GO_IDLE_STATE ||
255 cmd->opcode == MMC_GO_INACTIVE_STATE ||
256 (cmd->opcode == SD_IO_RW_DIRECT &&
257 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
258 cmdr |= SDMMC_CMD_STOP;
260 if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
261 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
263 if (cmd->flags & MMC_RSP_PRESENT) {
264 /* We expect a response, so set this bit */
265 cmdr |= SDMMC_CMD_RESP_EXP;
266 if (cmd->flags & MMC_RSP_136)
267 cmdr |= SDMMC_CMD_RESP_LONG;
270 if (cmd->flags & MMC_RSP_CRC)
271 cmdr |= SDMMC_CMD_RESP_CRC;
275 cmdr |= SDMMC_CMD_DAT_EXP;
276 if (data->flags & MMC_DATA_STREAM)
277 cmdr |= SDMMC_CMD_STRM_MODE;
278 if (data->flags & MMC_DATA_WRITE)
279 cmdr |= SDMMC_CMD_DAT_WR;
282 if (drv_data && drv_data->prepare_command)
283 drv_data->prepare_command(slot->host, &cmdr);
288 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
290 struct mmc_command *stop;
296 stop = &host->stop_abort;
298 memset(stop, 0, sizeof(struct mmc_command));
300 if (cmdr == MMC_READ_SINGLE_BLOCK ||
301 cmdr == MMC_READ_MULTIPLE_BLOCK ||
302 cmdr == MMC_WRITE_BLOCK ||
303 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
304 stop->opcode = MMC_STOP_TRANSMISSION;
306 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
307 } else if (cmdr == SD_IO_RW_EXTENDED) {
308 stop->opcode = SD_IO_RW_DIRECT;
309 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
310 ((cmd->arg >> 28) & 0x7);
311 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
316 cmdr = stop->opcode | SDMMC_CMD_STOP |
317 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
322 static void dw_mci_start_command(struct dw_mci *host,
323 struct mmc_command *cmd, u32 cmd_flags)
327 "start command: ARGR=0x%08x CMDR=0x%08x\n",
328 cmd->arg, cmd_flags);
330 mci_writel(host, CMDARG, cmd->arg);
333 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
336 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
338 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
339 dw_mci_start_command(host, stop, host->stop_cmdr);
342 /* DMA interface functions */
343 static void dw_mci_stop_dma(struct dw_mci *host)
345 if (host->using_dma) {
346 host->dma_ops->stop(host);
347 host->dma_ops->cleanup(host);
350 /* Data transfer was stopped by the interrupt handler */
351 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
354 static int dw_mci_get_dma_dir(struct mmc_data *data)
356 if (data->flags & MMC_DATA_WRITE)
357 return DMA_TO_DEVICE;
359 return DMA_FROM_DEVICE;
362 #ifdef CONFIG_MMC_DW_IDMAC
363 static void dw_mci_dma_cleanup(struct dw_mci *host)
365 struct mmc_data *data = host->data;
368 if (!data->host_cookie)
369 dma_unmap_sg(host->dev,
372 dw_mci_get_dma_dir(data));
375 static void dw_mci_idmac_reset(struct dw_mci *host)
377 u32 bmod = mci_readl(host, BMOD);
378 /* Software reset of DMA */
379 bmod |= SDMMC_IDMAC_SWRESET;
380 mci_writel(host, BMOD, bmod);
383 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
387 /* Disable and reset the IDMAC interface */
388 temp = mci_readl(host, CTRL);
389 temp &= ~SDMMC_CTRL_USE_IDMAC;
390 temp |= SDMMC_CTRL_DMA_RESET;
391 mci_writel(host, CTRL, temp);
393 /* Stop the IDMAC running */
394 temp = mci_readl(host, BMOD);
395 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
396 temp |= SDMMC_IDMAC_SWRESET;
397 mci_writel(host, BMOD, temp);
400 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
402 struct mmc_data *data = host->data;
404 dev_vdbg(host->dev, "DMA complete\n");
406 host->dma_ops->cleanup(host);
409 * If the card was removed, data will be NULL. No point in trying to
410 * send the stop command or waiting for NBUSY in this case.
413 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
414 tasklet_schedule(&host->tasklet);
418 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
422 struct idmac_desc *desc = host->sg_cpu;
424 for (i = 0; i < sg_len; i++, desc++) {
425 unsigned int length = sg_dma_len(&data->sg[i]);
426 u32 mem_addr = sg_dma_address(&data->sg[i]);
428 /* Set the OWN bit and disable interrupts for this descriptor */
429 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
432 IDMAC_SET_BUFFER1_SIZE(desc, length);
434 /* Physical address to DMA to/from */
435 desc->des2 = mem_addr;
438 /* Set first descriptor */
440 desc->des0 |= IDMAC_DES0_FD;
442 /* Set last descriptor */
443 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
444 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
445 desc->des0 |= IDMAC_DES0_LD;
450 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
454 dw_mci_translate_sglist(host, host->data, sg_len);
456 /* Select IDMAC interface */
457 temp = mci_readl(host, CTRL);
458 temp |= SDMMC_CTRL_USE_IDMAC;
459 mci_writel(host, CTRL, temp);
463 /* Enable the IDMAC */
464 temp = mci_readl(host, BMOD);
465 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
466 mci_writel(host, BMOD, temp);
468 /* Start it running */
469 mci_writel(host, PLDMND, 1);
472 static int dw_mci_idmac_init(struct dw_mci *host)
474 struct idmac_desc *p;
477 /* Number of descriptors in the ring buffer */
478 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
480 /* Forward link the descriptor list */
481 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
482 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
484 /* Set the last descriptor as the end-of-ring descriptor */
485 p->des3 = host->sg_dma;
486 p->des0 = IDMAC_DES0_ER;
488 dw_mci_idmac_reset(host);
490 /* Mask out interrupts - get Tx & Rx complete only */
491 mci_writel(host, IDSTS, IDMAC_INT_CLR);
492 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
495 /* Set the descriptor base address */
496 mci_writel(host, DBADDR, host->sg_dma);
500 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
501 .init = dw_mci_idmac_init,
502 .start = dw_mci_idmac_start_dma,
503 .stop = dw_mci_idmac_stop_dma,
504 .complete = dw_mci_idmac_complete_dma,
505 .cleanup = dw_mci_dma_cleanup,
507 #endif /* CONFIG_MMC_DW_IDMAC */
509 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
510 struct mmc_data *data,
513 struct scatterlist *sg;
514 unsigned int i, sg_len;
516 if (!next && data->host_cookie)
517 return data->host_cookie;
520 * We don't do DMA on "complex" transfers, i.e. with
521 * non-word-aligned buffers or lengths. Also, we don't bother
522 * with all the DMA setup overhead for short transfers.
524 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
530 for_each_sg(data->sg, sg, data->sg_len, i) {
531 if (sg->offset & 3 || sg->length & 3)
535 sg_len = dma_map_sg(host->dev,
538 dw_mci_get_dma_dir(data));
543 data->host_cookie = sg_len;
548 static void dw_mci_pre_req(struct mmc_host *mmc,
549 struct mmc_request *mrq,
552 struct dw_mci_slot *slot = mmc_priv(mmc);
553 struct mmc_data *data = mrq->data;
555 if (!slot->host->use_dma || !data)
558 if (data->host_cookie) {
559 data->host_cookie = 0;
563 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
564 data->host_cookie = 0;
567 static void dw_mci_post_req(struct mmc_host *mmc,
568 struct mmc_request *mrq,
571 struct dw_mci_slot *slot = mmc_priv(mmc);
572 struct mmc_data *data = mrq->data;
574 if (!slot->host->use_dma || !data)
577 if (data->host_cookie)
578 dma_unmap_sg(slot->host->dev,
581 dw_mci_get_dma_dir(data));
582 data->host_cookie = 0;
585 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
587 #ifdef CONFIG_MMC_DW_IDMAC
588 unsigned int blksz = data->blksz;
589 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
590 u32 fifo_width = 1 << host->data_shift;
591 u32 blksz_depth = blksz / fifo_width, fifoth_val;
592 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
593 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
595 tx_wmark = (host->fifo_depth) / 2;
596 tx_wmark_invers = host->fifo_depth - tx_wmark;
600 * if blksz is not a multiple of the FIFO width
602 if (blksz % fifo_width) {
609 if (!((blksz_depth % mszs[idx]) ||
610 (tx_wmark_invers % mszs[idx]))) {
612 rx_wmark = mszs[idx] - 1;
617 * If idx is '0', it won't be tried
618 * Thus, initial values are uesed
621 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
622 mci_writel(host, FIFOTH, fifoth_val);
626 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
628 unsigned int blksz = data->blksz;
629 u32 blksz_depth, fifo_depth;
632 WARN_ON(!(data->flags & MMC_DATA_READ));
634 if (host->timing != MMC_TIMING_MMC_HS200 &&
635 host->timing != MMC_TIMING_UHS_SDR104)
638 blksz_depth = blksz / (1 << host->data_shift);
639 fifo_depth = host->fifo_depth;
641 if (blksz_depth > fifo_depth)
645 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
646 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
647 * Currently just choose blksz.
650 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
654 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
657 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
664 /* If we don't have a channel, we can't do DMA */
668 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
670 host->dma_ops->stop(host);
677 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
678 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
682 * Decide the MSIZE and RX/TX Watermark.
683 * If current block size is same with previous size,
684 * no need to update fifoth.
686 if (host->prev_blksz != data->blksz)
687 dw_mci_adjust_fifoth(host, data);
689 /* Enable the DMA interface */
690 temp = mci_readl(host, CTRL);
691 temp |= SDMMC_CTRL_DMA_ENABLE;
692 mci_writel(host, CTRL, temp);
694 /* Disable RX/TX IRQs, let DMA handle it */
695 temp = mci_readl(host, INTMASK);
696 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
697 mci_writel(host, INTMASK, temp);
699 host->dma_ops->start(host, sg_len);
704 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
708 data->error = -EINPROGRESS;
714 if (data->flags & MMC_DATA_READ) {
715 host->dir_status = DW_MCI_RECV_STATUS;
716 dw_mci_ctrl_rd_thld(host, data);
718 host->dir_status = DW_MCI_SEND_STATUS;
721 if (dw_mci_submit_data_dma(host, data)) {
722 int flags = SG_MITER_ATOMIC;
723 if (host->data->flags & MMC_DATA_READ)
724 flags |= SG_MITER_TO_SG;
726 flags |= SG_MITER_FROM_SG;
728 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
730 host->part_buf_start = 0;
731 host->part_buf_count = 0;
733 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
734 temp = mci_readl(host, INTMASK);
735 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
736 mci_writel(host, INTMASK, temp);
738 temp = mci_readl(host, CTRL);
739 temp &= ~SDMMC_CTRL_DMA_ENABLE;
740 mci_writel(host, CTRL, temp);
743 * Use the initial fifoth_val for PIO mode.
744 * If next issued data may be transfered by DMA mode,
745 * prev_blksz should be invalidated.
747 mci_writel(host, FIFOTH, host->fifoth_val);
748 host->prev_blksz = 0;
751 * Keep the current block size.
752 * It will be used to decide whether to update
753 * fifoth register next time.
755 host->prev_blksz = data->blksz;
759 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
761 struct dw_mci *host = slot->host;
762 unsigned long timeout = jiffies + msecs_to_jiffies(500);
763 unsigned int cmd_status = 0;
765 mci_writel(host, CMDARG, arg);
767 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
769 while (time_before(jiffies, timeout)) {
770 cmd_status = mci_readl(host, CMD);
771 if (!(cmd_status & SDMMC_CMD_START))
774 dev_err(&slot->mmc->class_dev,
775 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
776 cmd, arg, cmd_status);
779 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
781 struct dw_mci *host = slot->host;
782 unsigned int clock = slot->clock;
787 mci_writel(host, CLKENA, 0);
789 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
790 } else if (clock != host->current_speed || force_clkinit) {
791 div = host->bus_hz / clock;
792 if (host->bus_hz % clock && host->bus_hz > clock)
794 * move the + 1 after the divide to prevent
795 * over-clocking the card.
799 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
801 if ((clock << div) != slot->__clk_old || force_clkinit)
802 dev_info(&slot->mmc->class_dev,
803 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
804 slot->id, host->bus_hz, clock,
805 div ? ((host->bus_hz / div) >> 1) :
809 mci_writel(host, CLKENA, 0);
810 mci_writel(host, CLKSRC, 0);
814 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816 /* set clock to desired speed */
817 mci_writel(host, CLKDIV, div);
821 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
823 /* enable clock; only low power if no SDIO */
824 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
825 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
826 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
827 mci_writel(host, CLKENA, clk_en_a);
831 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
833 /* keep the clock with reflecting clock dividor */
834 slot->__clk_old = clock << div;
837 host->current_speed = clock;
839 /* Set the current slot bus width */
840 mci_writel(host, CTYPE, (slot->ctype << slot->id));
843 static void __dw_mci_start_request(struct dw_mci *host,
844 struct dw_mci_slot *slot,
845 struct mmc_command *cmd)
847 struct mmc_request *mrq;
848 struct mmc_data *data;
852 if (host->pdata->select_slot)
853 host->pdata->select_slot(slot->id);
855 host->cur_slot = slot;
858 host->pending_events = 0;
859 host->completed_events = 0;
860 host->cmd_status = 0;
861 host->data_status = 0;
862 host->dir_status = 0;
866 dw_mci_set_timeout(host);
867 mci_writel(host, BYTCNT, data->blksz*data->blocks);
868 mci_writel(host, BLKSIZ, data->blksz);
871 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
873 /* this is the first command, send the initialization clock */
874 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
875 cmdflags |= SDMMC_CMD_INIT;
878 dw_mci_submit_data(host, data);
882 dw_mci_start_command(host, cmd, cmdflags);
885 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
887 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
890 static void dw_mci_start_request(struct dw_mci *host,
891 struct dw_mci_slot *slot)
893 struct mmc_request *mrq = slot->mrq;
894 struct mmc_command *cmd;
896 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
897 __dw_mci_start_request(host, slot, cmd);
900 /* must be called with host->lock held */
901 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
902 struct mmc_request *mrq)
904 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
909 if (host->state == STATE_IDLE) {
910 host->state = STATE_SENDING_CMD;
911 dw_mci_start_request(host, slot);
913 list_add_tail(&slot->queue_node, &host->queue);
917 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
919 struct dw_mci_slot *slot = mmc_priv(mmc);
920 struct dw_mci *host = slot->host;
925 * The check for card presence and queueing of the request must be
926 * atomic, otherwise the card could be removed in between and the
927 * request wouldn't fail until another card was inserted.
929 spin_lock_bh(&host->lock);
931 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
932 spin_unlock_bh(&host->lock);
933 mrq->cmd->error = -ENOMEDIUM;
934 mmc_request_done(mmc, mrq);
938 dw_mci_queue_request(host, slot, mrq);
940 spin_unlock_bh(&host->lock);
943 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
945 struct dw_mci_slot *slot = mmc_priv(mmc);
946 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
949 switch (ios->bus_width) {
950 case MMC_BUS_WIDTH_4:
951 slot->ctype = SDMMC_CTYPE_4BIT;
953 case MMC_BUS_WIDTH_8:
954 slot->ctype = SDMMC_CTYPE_8BIT;
957 /* set default 1 bit mode */
958 slot->ctype = SDMMC_CTYPE_1BIT;
961 regs = mci_readl(slot->host, UHS_REG);
964 if (ios->timing == MMC_TIMING_UHS_DDR50)
965 regs |= ((0x1 << slot->id) << 16);
967 regs &= ~((0x1 << slot->id) << 16);
969 mci_writel(slot->host, UHS_REG, regs);
970 slot->host->timing = ios->timing;
973 * Use mirror of ios->clock to prevent race with mmc
974 * core ios update when finding the minimum.
976 slot->clock = ios->clock;
978 if (drv_data && drv_data->set_ios)
979 drv_data->set_ios(slot->host, ios);
981 /* Slot specific timing and width adjustment */
982 dw_mci_setup_bus(slot, false);
984 switch (ios->power_mode) {
986 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
988 if (slot->host->pdata->setpower)
989 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
990 regs = mci_readl(slot->host, PWREN);
991 regs |= (1 << slot->id);
992 mci_writel(slot->host, PWREN, regs);
995 /* Power down slot */
996 if (slot->host->pdata->setpower)
997 slot->host->pdata->setpower(slot->id, 0);
998 regs = mci_readl(slot->host, PWREN);
999 regs &= ~(1 << slot->id);
1000 mci_writel(slot->host, PWREN, regs);
1007 static int dw_mci_get_ro(struct mmc_host *mmc)
1010 struct dw_mci_slot *slot = mmc_priv(mmc);
1011 struct dw_mci_board *brd = slot->host->pdata;
1013 /* Use platform get_ro function, else try on board write protect */
1014 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1016 else if (brd->get_ro)
1017 read_only = brd->get_ro(slot->id);
1018 else if (gpio_is_valid(slot->wp_gpio))
1019 read_only = gpio_get_value(slot->wp_gpio);
1022 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024 dev_dbg(&mmc->class_dev, "card is %s\n",
1025 read_only ? "read-only" : "read-write");
1030 static int dw_mci_get_cd(struct mmc_host *mmc)
1033 struct dw_mci_slot *slot = mmc_priv(mmc);
1034 struct dw_mci_board *brd = slot->host->pdata;
1036 /* Use platform get_cd function, else try onboard card detect */
1037 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1039 else if (brd->get_cd)
1040 present = !brd->get_cd(slot->id);
1042 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1046 dev_dbg(&mmc->class_dev, "card is present\n");
1048 dev_dbg(&mmc->class_dev, "card is not present\n");
1054 * Disable lower power mode.
1056 * Low power mode will stop the card clock when idle. According to the
1057 * description of the CLKENA register we should disable low power mode
1058 * for SDIO cards if we need SDIO interrupts to work.
1060 * This function is fast if low power mode is already disabled.
1062 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1064 struct dw_mci *host = slot->host;
1066 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1068 clk_en_a = mci_readl(host, CLKENA);
1070 if (clk_en_a & clken_low_pwr) {
1071 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1072 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1073 SDMMC_CMD_PRV_DAT_WAIT, 0);
1077 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1079 struct dw_mci_slot *slot = mmc_priv(mmc);
1080 struct dw_mci *host = slot->host;
1083 /* Enable/disable Slot Specific SDIO interrupt */
1084 int_mask = mci_readl(host, INTMASK);
1087 * Turn off low power mode if it was enabled. This is a bit of
1088 * a heavy operation and we disable / enable IRQs a lot, so
1089 * we'll leave low power mode disabled and it will get
1090 * re-enabled again in dw_mci_setup_bus().
1092 dw_mci_disable_low_power(slot);
1094 mci_writel(host, INTMASK,
1095 (int_mask | SDMMC_INT_SDIO(slot->id)));
1097 mci_writel(host, INTMASK,
1098 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1102 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1104 struct dw_mci_slot *slot = mmc_priv(mmc);
1105 struct dw_mci *host = slot->host;
1106 const struct dw_mci_drv_data *drv_data = host->drv_data;
1107 struct dw_mci_tuning_data tuning_data;
1110 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1111 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1112 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1113 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1114 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1115 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1116 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1120 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1121 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1122 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1125 "Undefined command(%d) for tuning\n", opcode);
1129 if (drv_data && drv_data->execute_tuning)
1130 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1134 static const struct mmc_host_ops dw_mci_ops = {
1135 .request = dw_mci_request,
1136 .pre_req = dw_mci_pre_req,
1137 .post_req = dw_mci_post_req,
1138 .set_ios = dw_mci_set_ios,
1139 .get_ro = dw_mci_get_ro,
1140 .get_cd = dw_mci_get_cd,
1141 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1142 .execute_tuning = dw_mci_execute_tuning,
1145 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1146 __releases(&host->lock)
1147 __acquires(&host->lock)
1149 struct dw_mci_slot *slot;
1150 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1152 WARN_ON(host->cmd || host->data);
1154 host->cur_slot->mrq = NULL;
1156 if (!list_empty(&host->queue)) {
1157 slot = list_entry(host->queue.next,
1158 struct dw_mci_slot, queue_node);
1159 list_del(&slot->queue_node);
1160 dev_vdbg(host->dev, "list not empty: %s is next\n",
1161 mmc_hostname(slot->mmc));
1162 host->state = STATE_SENDING_CMD;
1163 dw_mci_start_request(host, slot);
1165 dev_vdbg(host->dev, "list empty\n");
1166 host->state = STATE_IDLE;
1169 spin_unlock(&host->lock);
1170 mmc_request_done(prev_mmc, mrq);
1171 spin_lock(&host->lock);
1174 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1176 u32 status = host->cmd_status;
1178 host->cmd_status = 0;
1180 /* Read the response from the card (up to 16 bytes) */
1181 if (cmd->flags & MMC_RSP_PRESENT) {
1182 if (cmd->flags & MMC_RSP_136) {
1183 cmd->resp[3] = mci_readl(host, RESP0);
1184 cmd->resp[2] = mci_readl(host, RESP1);
1185 cmd->resp[1] = mci_readl(host, RESP2);
1186 cmd->resp[0] = mci_readl(host, RESP3);
1188 cmd->resp[0] = mci_readl(host, RESP0);
1195 if (status & SDMMC_INT_RTO)
1196 cmd->error = -ETIMEDOUT;
1197 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1198 cmd->error = -EILSEQ;
1199 else if (status & SDMMC_INT_RESP_ERR)
1205 /* newer ip versions need a delay between retries */
1206 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1213 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1215 u32 status = host->data_status;
1217 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1218 if (status & SDMMC_INT_DRTO) {
1219 data->error = -ETIMEDOUT;
1220 } else if (status & SDMMC_INT_DCRC) {
1221 data->error = -EILSEQ;
1222 } else if (status & SDMMC_INT_EBE) {
1223 if (host->dir_status ==
1224 DW_MCI_SEND_STATUS) {
1226 * No data CRC status was returned.
1227 * The number of bytes transferred
1228 * will be exaggerated in PIO mode.
1230 data->bytes_xfered = 0;
1231 data->error = -ETIMEDOUT;
1232 } else if (host->dir_status ==
1233 DW_MCI_RECV_STATUS) {
1237 /* SDMMC_INT_SBE is included */
1241 dev_err(host->dev, "data error, status 0x%08x\n", status);
1244 * After an error, there may be data lingering
1247 dw_mci_fifo_reset(host);
1249 data->bytes_xfered = data->blocks * data->blksz;
1256 static void dw_mci_tasklet_func(unsigned long priv)
1258 struct dw_mci *host = (struct dw_mci *)priv;
1259 struct mmc_data *data;
1260 struct mmc_command *cmd;
1261 struct mmc_request *mrq;
1262 enum dw_mci_state state;
1263 enum dw_mci_state prev_state;
1266 spin_lock(&host->lock);
1268 state = host->state;
1279 case STATE_SENDING_CMD:
1280 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1281 &host->pending_events))
1286 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1287 err = dw_mci_command_complete(host, cmd);
1288 if (cmd == mrq->sbc && !err) {
1289 prev_state = state = STATE_SENDING_CMD;
1290 __dw_mci_start_request(host, host->cur_slot,
1295 if (cmd->data && err) {
1296 dw_mci_stop_dma(host);
1297 send_stop_abort(host, data);
1298 state = STATE_SENDING_STOP;
1302 if (!cmd->data || err) {
1303 dw_mci_request_end(host, mrq);
1307 prev_state = state = STATE_SENDING_DATA;
1310 case STATE_SENDING_DATA:
1311 if (test_and_clear_bit(EVENT_DATA_ERROR,
1312 &host->pending_events)) {
1313 dw_mci_stop_dma(host);
1314 send_stop_abort(host, data);
1315 state = STATE_DATA_ERROR;
1319 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1320 &host->pending_events))
1323 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1324 prev_state = state = STATE_DATA_BUSY;
1327 case STATE_DATA_BUSY:
1328 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1329 &host->pending_events))
1333 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1334 err = dw_mci_data_complete(host, data);
1337 if (!data->stop || mrq->sbc) {
1339 data->stop->error = 0;
1340 dw_mci_request_end(host, mrq);
1344 /* stop command for open-ended transfer*/
1346 send_stop_abort(host, data);
1350 * If err has non-zero,
1351 * stop-abort command has been already issued.
1353 prev_state = state = STATE_SENDING_STOP;
1357 case STATE_SENDING_STOP:
1358 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1359 &host->pending_events))
1362 /* CMD error in data command */
1363 if (mrq->cmd->error && mrq->data)
1364 dw_mci_fifo_reset(host);
1370 dw_mci_command_complete(host, mrq->stop);
1372 host->cmd_status = 0;
1374 dw_mci_request_end(host, mrq);
1377 case STATE_DATA_ERROR:
1378 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1379 &host->pending_events))
1382 state = STATE_DATA_BUSY;
1385 } while (state != prev_state);
1387 host->state = state;
1389 spin_unlock(&host->lock);
1393 /* push final bytes to part_buf, only use during push */
1394 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1396 memcpy((void *)&host->part_buf, buf, cnt);
1397 host->part_buf_count = cnt;
1400 /* append bytes to part_buf, only use during push */
1401 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1403 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1404 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1405 host->part_buf_count += cnt;
1409 /* pull first bytes from part_buf, only use during pull */
1410 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1412 cnt = min(cnt, (int)host->part_buf_count);
1414 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1416 host->part_buf_count -= cnt;
1417 host->part_buf_start += cnt;
1422 /* pull final bytes from the part_buf, assuming it's just been filled */
1423 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1425 memcpy(buf, &host->part_buf, cnt);
1426 host->part_buf_start = cnt;
1427 host->part_buf_count = (1 << host->data_shift) - cnt;
1430 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1432 struct mmc_data *data = host->data;
1435 /* try and push anything in the part_buf */
1436 if (unlikely(host->part_buf_count)) {
1437 int len = dw_mci_push_part_bytes(host, buf, cnt);
1440 if (host->part_buf_count == 2) {
1441 mci_writew(host, DATA(host->data_offset),
1443 host->part_buf_count = 0;
1446 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1447 if (unlikely((unsigned long)buf & 0x1)) {
1449 u16 aligned_buf[64];
1450 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1451 int items = len >> 1;
1453 /* memcpy from input buffer into aligned buffer */
1454 memcpy(aligned_buf, buf, len);
1457 /* push data from aligned buffer into fifo */
1458 for (i = 0; i < items; ++i)
1459 mci_writew(host, DATA(host->data_offset),
1466 for (; cnt >= 2; cnt -= 2)
1467 mci_writew(host, DATA(host->data_offset), *pdata++);
1470 /* put anything remaining in the part_buf */
1472 dw_mci_set_part_bytes(host, buf, cnt);
1473 /* Push data if we have reached the expected data length */
1474 if ((data->bytes_xfered + init_cnt) ==
1475 (data->blksz * data->blocks))
1476 mci_writew(host, DATA(host->data_offset),
1481 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1483 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1484 if (unlikely((unsigned long)buf & 0x1)) {
1486 /* pull data from fifo into aligned buffer */
1487 u16 aligned_buf[64];
1488 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1489 int items = len >> 1;
1491 for (i = 0; i < items; ++i)
1492 aligned_buf[i] = mci_readw(host,
1493 DATA(host->data_offset));
1494 /* memcpy from aligned buffer into output buffer */
1495 memcpy(buf, aligned_buf, len);
1503 for (; cnt >= 2; cnt -= 2)
1504 *pdata++ = mci_readw(host, DATA(host->data_offset));
1508 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1509 dw_mci_pull_final_bytes(host, buf, cnt);
1513 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1515 struct mmc_data *data = host->data;
1518 /* try and push anything in the part_buf */
1519 if (unlikely(host->part_buf_count)) {
1520 int len = dw_mci_push_part_bytes(host, buf, cnt);
1523 if (host->part_buf_count == 4) {
1524 mci_writel(host, DATA(host->data_offset),
1526 host->part_buf_count = 0;
1529 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1530 if (unlikely((unsigned long)buf & 0x3)) {
1532 u32 aligned_buf[32];
1533 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1534 int items = len >> 2;
1536 /* memcpy from input buffer into aligned buffer */
1537 memcpy(aligned_buf, buf, len);
1540 /* push data from aligned buffer into fifo */
1541 for (i = 0; i < items; ++i)
1542 mci_writel(host, DATA(host->data_offset),
1549 for (; cnt >= 4; cnt -= 4)
1550 mci_writel(host, DATA(host->data_offset), *pdata++);
1553 /* put anything remaining in the part_buf */
1555 dw_mci_set_part_bytes(host, buf, cnt);
1556 /* Push data if we have reached the expected data length */
1557 if ((data->bytes_xfered + init_cnt) ==
1558 (data->blksz * data->blocks))
1559 mci_writel(host, DATA(host->data_offset),
1564 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1566 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1567 if (unlikely((unsigned long)buf & 0x3)) {
1569 /* pull data from fifo into aligned buffer */
1570 u32 aligned_buf[32];
1571 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1572 int items = len >> 2;
1574 for (i = 0; i < items; ++i)
1575 aligned_buf[i] = mci_readl(host,
1576 DATA(host->data_offset));
1577 /* memcpy from aligned buffer into output buffer */
1578 memcpy(buf, aligned_buf, len);
1586 for (; cnt >= 4; cnt -= 4)
1587 *pdata++ = mci_readl(host, DATA(host->data_offset));
1591 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1592 dw_mci_pull_final_bytes(host, buf, cnt);
1596 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1598 struct mmc_data *data = host->data;
1601 /* try and push anything in the part_buf */
1602 if (unlikely(host->part_buf_count)) {
1603 int len = dw_mci_push_part_bytes(host, buf, cnt);
1607 if (host->part_buf_count == 8) {
1608 mci_writeq(host, DATA(host->data_offset),
1610 host->part_buf_count = 0;
1613 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1614 if (unlikely((unsigned long)buf & 0x7)) {
1616 u64 aligned_buf[16];
1617 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1618 int items = len >> 3;
1620 /* memcpy from input buffer into aligned buffer */
1621 memcpy(aligned_buf, buf, len);
1624 /* push data from aligned buffer into fifo */
1625 for (i = 0; i < items; ++i)
1626 mci_writeq(host, DATA(host->data_offset),
1633 for (; cnt >= 8; cnt -= 8)
1634 mci_writeq(host, DATA(host->data_offset), *pdata++);
1637 /* put anything remaining in the part_buf */
1639 dw_mci_set_part_bytes(host, buf, cnt);
1640 /* Push data if we have reached the expected data length */
1641 if ((data->bytes_xfered + init_cnt) ==
1642 (data->blksz * data->blocks))
1643 mci_writeq(host, DATA(host->data_offset),
1648 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1650 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1651 if (unlikely((unsigned long)buf & 0x7)) {
1653 /* pull data from fifo into aligned buffer */
1654 u64 aligned_buf[16];
1655 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1656 int items = len >> 3;
1658 for (i = 0; i < items; ++i)
1659 aligned_buf[i] = mci_readq(host,
1660 DATA(host->data_offset));
1661 /* memcpy from aligned buffer into output buffer */
1662 memcpy(buf, aligned_buf, len);
1670 for (; cnt >= 8; cnt -= 8)
1671 *pdata++ = mci_readq(host, DATA(host->data_offset));
1675 host->part_buf = mci_readq(host, DATA(host->data_offset));
1676 dw_mci_pull_final_bytes(host, buf, cnt);
1680 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1684 /* get remaining partial bytes */
1685 len = dw_mci_pull_part_bytes(host, buf, cnt);
1686 if (unlikely(len == cnt))
1691 /* get the rest of the data */
1692 host->pull_data(host, buf, cnt);
1695 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1697 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1699 unsigned int offset;
1700 struct mmc_data *data = host->data;
1701 int shift = host->data_shift;
1704 unsigned int remain, fcnt;
1707 if (!sg_miter_next(sg_miter))
1710 host->sg = sg_miter->piter.sg;
1711 buf = sg_miter->addr;
1712 remain = sg_miter->length;
1716 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1717 << shift) + host->part_buf_count;
1718 len = min(remain, fcnt);
1721 dw_mci_pull_data(host, (void *)(buf + offset), len);
1722 data->bytes_xfered += len;
1727 sg_miter->consumed = offset;
1728 status = mci_readl(host, MINTSTS);
1729 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1730 /* if the RXDR is ready read again */
1731 } while ((status & SDMMC_INT_RXDR) ||
1732 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1735 if (!sg_miter_next(sg_miter))
1737 sg_miter->consumed = 0;
1739 sg_miter_stop(sg_miter);
1743 sg_miter_stop(sg_miter);
1746 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1749 static void dw_mci_write_data_pio(struct dw_mci *host)
1751 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1753 unsigned int offset;
1754 struct mmc_data *data = host->data;
1755 int shift = host->data_shift;
1758 unsigned int fifo_depth = host->fifo_depth;
1759 unsigned int remain, fcnt;
1762 if (!sg_miter_next(sg_miter))
1765 host->sg = sg_miter->piter.sg;
1766 buf = sg_miter->addr;
1767 remain = sg_miter->length;
1771 fcnt = ((fifo_depth -
1772 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1773 << shift) - host->part_buf_count;
1774 len = min(remain, fcnt);
1777 host->push_data(host, (void *)(buf + offset), len);
1778 data->bytes_xfered += len;
1783 sg_miter->consumed = offset;
1784 status = mci_readl(host, MINTSTS);
1785 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1786 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1789 if (!sg_miter_next(sg_miter))
1791 sg_miter->consumed = 0;
1793 sg_miter_stop(sg_miter);
1797 sg_miter_stop(sg_miter);
1800 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1803 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1805 if (!host->cmd_status)
1806 host->cmd_status = status;
1810 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1811 tasklet_schedule(&host->tasklet);
1814 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1816 struct dw_mci *host = dev_id;
1820 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1823 * DTO fix - version 2.10a and below, and only if internal DMA
1826 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1828 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1829 pending |= SDMMC_INT_DATA_OVER;
1833 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1834 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1835 host->cmd_status = pending;
1837 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1840 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1841 /* if there is an error report DATA_ERROR */
1842 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1843 host->data_status = pending;
1845 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1846 tasklet_schedule(&host->tasklet);
1849 if (pending & SDMMC_INT_DATA_OVER) {
1850 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1851 if (!host->data_status)
1852 host->data_status = pending;
1854 if (host->dir_status == DW_MCI_RECV_STATUS) {
1855 if (host->sg != NULL)
1856 dw_mci_read_data_pio(host, true);
1858 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1859 tasklet_schedule(&host->tasklet);
1862 if (pending & SDMMC_INT_RXDR) {
1863 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1864 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1865 dw_mci_read_data_pio(host, false);
1868 if (pending & SDMMC_INT_TXDR) {
1869 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1870 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1871 dw_mci_write_data_pio(host);
1874 if (pending & SDMMC_INT_CMD_DONE) {
1875 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1876 dw_mci_cmd_interrupt(host, pending);
1879 if (pending & SDMMC_INT_CD) {
1880 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1881 queue_work(host->card_workqueue, &host->card_work);
1884 /* Handle SDIO Interrupts */
1885 for (i = 0; i < host->num_slots; i++) {
1886 struct dw_mci_slot *slot = host->slot[i];
1887 if (pending & SDMMC_INT_SDIO(i)) {
1888 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1889 mmc_signal_sdio_irq(slot->mmc);
1895 #ifdef CONFIG_MMC_DW_IDMAC
1896 /* Handle DMA interrupts */
1897 pending = mci_readl(host, IDSTS);
1898 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1899 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1900 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1901 host->dma_ops->complete(host);
1908 static void dw_mci_work_routine_card(struct work_struct *work)
1910 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1913 for (i = 0; i < host->num_slots; i++) {
1914 struct dw_mci_slot *slot = host->slot[i];
1915 struct mmc_host *mmc = slot->mmc;
1916 struct mmc_request *mrq;
1919 present = dw_mci_get_cd(mmc);
1920 while (present != slot->last_detect_state) {
1921 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1922 present ? "inserted" : "removed");
1924 spin_lock_bh(&host->lock);
1926 /* Card change detected */
1927 slot->last_detect_state = present;
1929 /* Mark card as present if applicable */
1931 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1933 /* Clean up queue if present */
1936 if (mrq == host->mrq) {
1940 switch (host->state) {
1943 case STATE_SENDING_CMD:
1944 mrq->cmd->error = -ENOMEDIUM;
1948 case STATE_SENDING_DATA:
1949 mrq->data->error = -ENOMEDIUM;
1950 dw_mci_stop_dma(host);
1952 case STATE_DATA_BUSY:
1953 case STATE_DATA_ERROR:
1954 if (mrq->data->error == -EINPROGRESS)
1955 mrq->data->error = -ENOMEDIUM;
1957 case STATE_SENDING_STOP:
1959 mrq->stop->error = -ENOMEDIUM;
1963 dw_mci_request_end(host, mrq);
1965 list_del(&slot->queue_node);
1966 mrq->cmd->error = -ENOMEDIUM;
1968 mrq->data->error = -ENOMEDIUM;
1970 mrq->stop->error = -ENOMEDIUM;
1972 spin_unlock(&host->lock);
1973 mmc_request_done(slot->mmc, mrq);
1974 spin_lock(&host->lock);
1978 /* Power down slot */
1980 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1982 /* Clear down the FIFO */
1983 dw_mci_fifo_reset(host);
1984 #ifdef CONFIG_MMC_DW_IDMAC
1985 dw_mci_idmac_reset(host);
1990 spin_unlock_bh(&host->lock);
1992 present = dw_mci_get_cd(mmc);
1995 mmc_detect_change(slot->mmc,
1996 msecs_to_jiffies(host->pdata->detect_delay_ms));
2001 /* given a slot id, find out the device node representing that slot */
2002 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2004 struct device_node *np;
2008 if (!dev || !dev->of_node)
2011 for_each_child_of_node(dev->of_node, np) {
2012 addr = of_get_property(np, "reg", &len);
2013 if (!addr || (len < sizeof(int)))
2015 if (be32_to_cpup(addr) == slot)
2021 static struct dw_mci_of_slot_quirks {
2024 } of_slot_quirks[] = {
2026 .quirk = "disable-wp",
2027 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2031 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2033 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2039 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2040 quirks |= of_slot_quirks[idx].id;
2045 /* find out bus-width for a given slot */
2046 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2048 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2054 if (of_property_read_u32(np, "bus-width", &bus_wd))
2055 dev_err(dev, "bus-width property not found, assuming width"
2060 /* find the write protect gpio for a given slot; or -1 if none specified */
2061 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2063 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2069 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2071 /* Having a missing entry is valid; return silently */
2072 if (!gpio_is_valid(gpio))
2075 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2076 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2082 #else /* CONFIG_OF */
2083 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2087 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2091 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2095 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2099 #endif /* CONFIG_OF */
2101 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2103 struct mmc_host *mmc;
2104 struct dw_mci_slot *slot;
2105 const struct dw_mci_drv_data *drv_data = host->drv_data;
2110 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2114 slot = mmc_priv(mmc);
2118 host->slot[id] = slot;
2120 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2122 mmc->ops = &dw_mci_ops;
2123 if (of_property_read_u32_array(host->dev->of_node,
2124 "clock-freq-min-max", freq, 2)) {
2125 mmc->f_min = DW_MCI_FREQ_MIN;
2126 mmc->f_max = DW_MCI_FREQ_MAX;
2128 mmc->f_min = freq[0];
2129 mmc->f_max = freq[1];
2132 if (host->pdata->get_ocr)
2133 mmc->ocr_avail = host->pdata->get_ocr(id);
2135 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2138 * Start with slot power disabled, it will be enabled when a card
2141 if (host->pdata->setpower)
2142 host->pdata->setpower(id, 0);
2144 if (host->pdata->caps)
2145 mmc->caps = host->pdata->caps;
2147 if (host->pdata->pm_caps)
2148 mmc->pm_caps = host->pdata->pm_caps;
2150 if (host->dev->of_node) {
2151 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2155 ctrl_id = to_platform_device(host->dev)->id;
2157 if (drv_data && drv_data->caps)
2158 mmc->caps |= drv_data->caps[ctrl_id];
2160 if (host->pdata->caps2)
2161 mmc->caps2 = host->pdata->caps2;
2163 if (host->pdata->get_bus_wd)
2164 bus_width = host->pdata->get_bus_wd(slot->id);
2165 else if (host->dev->of_node)
2166 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2170 switch (bus_width) {
2172 mmc->caps |= MMC_CAP_8_BIT_DATA;
2174 mmc->caps |= MMC_CAP_4_BIT_DATA;
2177 if (host->pdata->blk_settings) {
2178 mmc->max_segs = host->pdata->blk_settings->max_segs;
2179 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2180 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2181 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2182 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2184 /* Useful defaults if platform data is unset. */
2185 #ifdef CONFIG_MMC_DW_IDMAC
2186 mmc->max_segs = host->ring_size;
2187 mmc->max_blk_size = 65536;
2188 mmc->max_blk_count = host->ring_size;
2189 mmc->max_seg_size = 0x1000;
2190 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2193 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2194 mmc->max_blk_count = 512;
2195 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2196 mmc->max_seg_size = mmc->max_req_size;
2197 #endif /* CONFIG_MMC_DW_IDMAC */
2200 if (dw_mci_get_cd(mmc))
2201 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2203 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2205 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2207 ret = mmc_add_host(mmc);
2211 #if defined(CONFIG_DEBUG_FS)
2212 dw_mci_init_debugfs(slot);
2215 /* Card initially undetected */
2216 slot->last_detect_state = 0;
2225 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2227 /* Shutdown detect IRQ */
2228 if (slot->host->pdata->exit)
2229 slot->host->pdata->exit(id);
2231 /* Debugfs stuff is cleaned up by mmc core */
2232 mmc_remove_host(slot->mmc);
2233 slot->host->slot[id] = NULL;
2234 mmc_free_host(slot->mmc);
2237 static void dw_mci_init_dma(struct dw_mci *host)
2239 /* Alloc memory for sg translation */
2240 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2241 &host->sg_dma, GFP_KERNEL);
2242 if (!host->sg_cpu) {
2243 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2248 /* Determine which DMA interface to use */
2249 #ifdef CONFIG_MMC_DW_IDMAC
2250 host->dma_ops = &dw_mci_idmac_ops;
2251 dev_info(host->dev, "Using internal DMA controller.\n");
2257 if (host->dma_ops->init && host->dma_ops->start &&
2258 host->dma_ops->stop && host->dma_ops->cleanup) {
2259 if (host->dma_ops->init(host)) {
2260 dev_err(host->dev, "%s: Unable to initialize "
2261 "DMA Controller.\n", __func__);
2265 dev_err(host->dev, "DMA initialization not found.\n");
2273 dev_info(host->dev, "Using PIO mode.\n");
2278 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2280 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2283 ctrl = mci_readl(host, CTRL);
2285 mci_writel(host, CTRL, ctrl);
2287 /* wait till resets clear */
2289 ctrl = mci_readl(host, CTRL);
2290 if (!(ctrl & reset))
2292 } while (time_before(jiffies, timeout));
2295 "Timeout resetting block (ctrl reset %#x)\n",
2301 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2304 * Reseting generates a block interrupt, hence setting
2305 * the scatter-gather pointer to NULL.
2308 sg_miter_stop(&host->sg_miter);
2312 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2315 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2317 return dw_mci_ctrl_reset(host,
2318 SDMMC_CTRL_FIFO_RESET |
2320 SDMMC_CTRL_DMA_RESET);
2324 static struct dw_mci_of_quirks {
2329 .quirk = "broken-cd",
2330 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2334 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2336 struct dw_mci_board *pdata;
2337 struct device *dev = host->dev;
2338 struct device_node *np = dev->of_node;
2339 const struct dw_mci_drv_data *drv_data = host->drv_data;
2341 u32 clock_frequency;
2343 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2345 dev_err(dev, "could not allocate memory for pdata\n");
2346 return ERR_PTR(-ENOMEM);
2349 /* find out number of slots supported */
2350 if (of_property_read_u32(dev->of_node, "num-slots",
2351 &pdata->num_slots)) {
2352 dev_info(dev, "num-slots property not found, "
2353 "assuming 1 slot is available\n");
2354 pdata->num_slots = 1;
2358 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2359 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2360 pdata->quirks |= of_quirks[idx].id;
2362 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2363 dev_info(dev, "fifo-depth property not found, using "
2364 "value of FIFOTH register as default\n");
2366 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2368 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2369 pdata->bus_hz = clock_frequency;
2371 if (drv_data && drv_data->parse_dt) {
2372 ret = drv_data->parse_dt(host);
2374 return ERR_PTR(ret);
2377 if (of_find_property(np, "keep-power-in-suspend", NULL))
2378 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2380 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2381 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2383 if (of_find_property(np, "supports-highspeed", NULL))
2384 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2386 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2387 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2389 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2390 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2395 #else /* CONFIG_OF */
2396 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2398 return ERR_PTR(-EINVAL);
2400 #endif /* CONFIG_OF */
2402 int dw_mci_probe(struct dw_mci *host)
2404 const struct dw_mci_drv_data *drv_data = host->drv_data;
2405 int width, i, ret = 0;
2410 host->pdata = dw_mci_parse_dt(host);
2411 if (IS_ERR(host->pdata)) {
2412 dev_err(host->dev, "platform data not available\n");
2417 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2419 "Platform data must supply select_slot function\n");
2423 host->biu_clk = devm_clk_get(host->dev, "biu");
2424 if (IS_ERR(host->biu_clk)) {
2425 dev_dbg(host->dev, "biu clock not available\n");
2427 ret = clk_prepare_enable(host->biu_clk);
2429 dev_err(host->dev, "failed to enable biu clock\n");
2434 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2435 if (IS_ERR(host->ciu_clk)) {
2436 dev_dbg(host->dev, "ciu clock not available\n");
2437 host->bus_hz = host->pdata->bus_hz;
2439 ret = clk_prepare_enable(host->ciu_clk);
2441 dev_err(host->dev, "failed to enable ciu clock\n");
2445 if (host->pdata->bus_hz) {
2446 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2449 "Unable to set bus rate to %ul\n",
2450 host->pdata->bus_hz);
2452 host->bus_hz = clk_get_rate(host->ciu_clk);
2455 if (drv_data && drv_data->init) {
2456 ret = drv_data->init(host);
2459 "implementation specific init failed\n");
2464 if (drv_data && drv_data->setup_clock) {
2465 ret = drv_data->setup_clock(host);
2468 "implementation specific clock setup failed\n");
2473 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2474 if (IS_ERR(host->vmmc)) {
2475 ret = PTR_ERR(host->vmmc);
2476 if (ret == -EPROBE_DEFER)
2479 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2482 ret = regulator_enable(host->vmmc);
2484 if (ret != -EPROBE_DEFER)
2486 "regulator_enable fail: %d\n", ret);
2491 if (!host->bus_hz) {
2493 "Platform data must supply bus speed\n");
2498 host->quirks = host->pdata->quirks;
2500 spin_lock_init(&host->lock);
2501 INIT_LIST_HEAD(&host->queue);
2504 * Get the host data width - this assumes that HCON has been set with
2505 * the correct values.
2507 i = (mci_readl(host, HCON) >> 7) & 0x7;
2509 host->push_data = dw_mci_push_data16;
2510 host->pull_data = dw_mci_pull_data16;
2512 host->data_shift = 1;
2513 } else if (i == 2) {
2514 host->push_data = dw_mci_push_data64;
2515 host->pull_data = dw_mci_pull_data64;
2517 host->data_shift = 3;
2519 /* Check for a reserved value, and warn if it is */
2521 "HCON reports a reserved host data width!\n"
2522 "Defaulting to 32-bit access.\n");
2523 host->push_data = dw_mci_push_data32;
2524 host->pull_data = dw_mci_pull_data32;
2526 host->data_shift = 2;
2529 /* Reset all blocks */
2530 if (!dw_mci_ctrl_all_reset(host))
2533 host->dma_ops = host->pdata->dma_ops;
2534 dw_mci_init_dma(host);
2536 /* Clear the interrupts for the host controller */
2537 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2538 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2540 /* Put in max timeout */
2541 mci_writel(host, TMOUT, 0xFFFFFFFF);
2544 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2545 * Tx Mark = fifo_size / 2 DMA Size = 8
2547 if (!host->pdata->fifo_depth) {
2549 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2550 * have been overwritten by the bootloader, just like we're
2551 * about to do, so if you know the value for your hardware, you
2552 * should put it in the platform data.
2554 fifo_size = mci_readl(host, FIFOTH);
2555 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2557 fifo_size = host->pdata->fifo_depth;
2559 host->fifo_depth = fifo_size;
2561 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2562 mci_writel(host, FIFOTH, host->fifoth_val);
2564 /* disable clock to CIU */
2565 mci_writel(host, CLKENA, 0);
2566 mci_writel(host, CLKSRC, 0);
2569 * In 2.40a spec, Data offset is changed.
2570 * Need to check the version-id and set data-offset for DATA register.
2572 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2573 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2575 if (host->verid < DW_MMC_240A)
2576 host->data_offset = DATA_OFFSET;
2578 host->data_offset = DATA_240A_OFFSET;
2580 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2581 host->card_workqueue = alloc_workqueue("dw-mci-card",
2582 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2583 if (!host->card_workqueue) {
2587 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2588 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2589 host->irq_flags, "dw-mci", host);
2593 if (host->pdata->num_slots)
2594 host->num_slots = host->pdata->num_slots;
2596 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2599 * Enable interrupts for command done, data over, data empty, card det,
2600 * receive ready and error such as transmit, receive timeout, crc error
2602 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2603 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2604 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2605 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2606 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2608 dev_info(host->dev, "DW MMC controller at irq %d, "
2609 "%d bit host data width, "
2611 host->irq, width, fifo_size);
2613 /* We need at least one slot to succeed */
2614 for (i = 0; i < host->num_slots; i++) {
2615 ret = dw_mci_init_slot(host, i);
2617 dev_dbg(host->dev, "slot %d init failed\n", i);
2623 dev_info(host->dev, "%d slots initialized\n", init_slots);
2625 dev_dbg(host->dev, "attempted to initialize %d slots, "
2626 "but failed on all\n", host->num_slots);
2630 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2631 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2636 destroy_workqueue(host->card_workqueue);
2639 if (host->use_dma && host->dma_ops->exit)
2640 host->dma_ops->exit(host);
2644 regulator_disable(host->vmmc);
2647 if (!IS_ERR(host->ciu_clk))
2648 clk_disable_unprepare(host->ciu_clk);
2651 if (!IS_ERR(host->biu_clk))
2652 clk_disable_unprepare(host->biu_clk);
2656 EXPORT_SYMBOL(dw_mci_probe);
2658 void dw_mci_remove(struct dw_mci *host)
2662 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2663 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2665 for (i = 0; i < host->num_slots; i++) {
2666 dev_dbg(host->dev, "remove slot %d\n", i);
2668 dw_mci_cleanup_slot(host->slot[i], i);
2671 /* disable clock to CIU */
2672 mci_writel(host, CLKENA, 0);
2673 mci_writel(host, CLKSRC, 0);
2675 destroy_workqueue(host->card_workqueue);
2677 if (host->use_dma && host->dma_ops->exit)
2678 host->dma_ops->exit(host);
2681 regulator_disable(host->vmmc);
2683 if (!IS_ERR(host->ciu_clk))
2684 clk_disable_unprepare(host->ciu_clk);
2686 if (!IS_ERR(host->biu_clk))
2687 clk_disable_unprepare(host->biu_clk);
2689 EXPORT_SYMBOL(dw_mci_remove);
2693 #ifdef CONFIG_PM_SLEEP
2695 * TODO: we should probably disable the clock to the card in the suspend path.
2697 int dw_mci_suspend(struct dw_mci *host)
2700 regulator_disable(host->vmmc);
2704 EXPORT_SYMBOL(dw_mci_suspend);
2706 int dw_mci_resume(struct dw_mci *host)
2711 ret = regulator_enable(host->vmmc);
2714 "failed to enable regulator: %d\n", ret);
2719 if (!dw_mci_ctrl_all_reset(host)) {
2724 if (host->use_dma && host->dma_ops->init)
2725 host->dma_ops->init(host);
2728 * Restore the initial value at FIFOTH register
2729 * And Invalidate the prev_blksz with zero
2731 mci_writel(host, FIFOTH, host->fifoth_val);
2732 host->prev_blksz = 0;
2734 /* Put in max timeout */
2735 mci_writel(host, TMOUT, 0xFFFFFFFF);
2737 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2738 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2739 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2740 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2741 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2743 for (i = 0; i < host->num_slots; i++) {
2744 struct dw_mci_slot *slot = host->slot[i];
2747 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2748 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2749 dw_mci_setup_bus(slot, true);
2754 EXPORT_SYMBOL(dw_mci_resume);
2755 #endif /* CONFIG_PM_SLEEP */
2757 static int __init dw_mci_init(void)
2759 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2763 static void __exit dw_mci_exit(void)
2767 module_init(dw_mci_init);
2768 module_exit(dw_mci_exit);
2770 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2771 MODULE_AUTHOR("NXP Semiconductor VietNam");
2772 MODULE_AUTHOR("Imagination Technologies Ltd");
2773 MODULE_LICENSE("GPL v2");