2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
51 static void sdhci_finish_data(struct sdhci_host *);
53 static void sdhci_finish_command(struct sdhci_host *);
54 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
57 struct mmc_data *data,
58 struct sdhci_host_next *next);
59 static int sdhci_do_get_cd(struct sdhci_host *host);
62 static int sdhci_runtime_pm_get(struct sdhci_host *host);
63 static int sdhci_runtime_pm_put(struct sdhci_host *host);
64 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
65 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
67 static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
71 static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
75 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
78 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
83 static void sdhci_dumpregs(struct sdhci_host *host)
85 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
86 mmc_hostname(host->mmc));
88 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
89 sdhci_readl(host, SDHCI_DMA_ADDRESS),
90 sdhci_readw(host, SDHCI_HOST_VERSION));
91 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
92 sdhci_readw(host, SDHCI_BLOCK_SIZE),
93 sdhci_readw(host, SDHCI_BLOCK_COUNT));
94 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
95 sdhci_readl(host, SDHCI_ARGUMENT),
96 sdhci_readw(host, SDHCI_TRANSFER_MODE));
97 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
98 sdhci_readl(host, SDHCI_PRESENT_STATE),
99 sdhci_readb(host, SDHCI_HOST_CONTROL));
100 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
101 sdhci_readb(host, SDHCI_POWER_CONTROL),
102 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
103 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
104 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
105 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
106 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
107 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
108 sdhci_readl(host, SDHCI_INT_STATUS));
109 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
110 sdhci_readl(host, SDHCI_INT_ENABLE),
111 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
112 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
113 sdhci_readw(host, SDHCI_ACMD12_ERR),
114 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
115 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
116 sdhci_readl(host, SDHCI_CAPABILITIES),
117 sdhci_readl(host, SDHCI_CAPABILITIES_1));
118 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
119 sdhci_readw(host, SDHCI_COMMAND),
120 sdhci_readl(host, SDHCI_MAX_CURRENT));
121 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
122 sdhci_readw(host, SDHCI_HOST_CONTROL2));
124 if (host->flags & SDHCI_USE_ADMA) {
125 if (host->flags & SDHCI_USE_64_BIT_DMA)
126 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
127 readl(host->ioaddr + SDHCI_ADMA_ERROR),
128 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
129 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
131 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
132 readl(host->ioaddr + SDHCI_ADMA_ERROR),
133 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
136 pr_debug(DRIVER_NAME ": ===========================================\n");
139 /*****************************************************************************\
141 * Low level functions *
143 \*****************************************************************************/
145 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
149 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
150 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
154 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
157 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
158 SDHCI_INT_CARD_INSERT;
160 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
163 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
164 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
167 static void sdhci_enable_card_detection(struct sdhci_host *host)
169 sdhci_set_card_detection(host, true);
172 static void sdhci_disable_card_detection(struct sdhci_host *host)
174 sdhci_set_card_detection(host, false);
177 void sdhci_reset(struct sdhci_host *host, u8 mask)
179 unsigned long timeout;
181 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
183 if (mask & SDHCI_RESET_ALL) {
185 /* Reset-all turns off SD Bus Power */
186 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
187 sdhci_runtime_pm_bus_off(host);
190 /* Wait max 100 ms */
193 /* hw clears the bit when it's done */
194 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
196 pr_err("%s: Reset 0x%x never completed.\n",
197 mmc_hostname(host->mmc), (int)mask);
198 sdhci_dumpregs(host);
205 EXPORT_SYMBOL_GPL(sdhci_reset);
207 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
209 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
210 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
215 host->ops->reset(host, mask);
217 if (mask & SDHCI_RESET_ALL) {
218 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
219 if (host->ops->enable_dma)
220 host->ops->enable_dma(host);
223 /* Resetting the controller clears many */
224 host->preset_enabled = false;
228 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
230 static void sdhci_init(struct sdhci_host *host, int soft)
233 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
235 sdhci_do_reset(host, SDHCI_RESET_ALL);
237 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
238 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
239 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
240 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
243 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
244 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
247 /* force clock reconfiguration */
249 sdhci_set_ios(host->mmc, &host->mmc->ios);
253 static void sdhci_reinit(struct sdhci_host *host)
256 sdhci_enable_card_detection(host);
259 static void sdhci_activate_led(struct sdhci_host *host)
263 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
264 ctrl |= SDHCI_CTRL_LED;
265 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
268 static void sdhci_deactivate_led(struct sdhci_host *host)
272 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
273 ctrl &= ~SDHCI_CTRL_LED;
274 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
277 #ifdef SDHCI_USE_LEDS_CLASS
278 static void sdhci_led_control(struct led_classdev *led,
279 enum led_brightness brightness)
281 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
284 spin_lock_irqsave(&host->lock, flags);
286 if (host->runtime_suspended)
289 if (brightness == LED_OFF)
290 sdhci_deactivate_led(host);
292 sdhci_activate_led(host);
294 spin_unlock_irqrestore(&host->lock, flags);
298 /*****************************************************************************\
302 \*****************************************************************************/
304 static void sdhci_read_block_pio(struct sdhci_host *host)
307 size_t blksize, len, chunk;
308 u32 uninitialized_var(scratch);
311 DBG("PIO reading\n");
313 blksize = host->data->blksz;
316 local_irq_save(flags);
319 BUG_ON(!sg_miter_next(&host->sg_miter));
321 len = min(host->sg_miter.length, blksize);
324 host->sg_miter.consumed = len;
326 buf = host->sg_miter.addr;
330 scratch = sdhci_readl(host, SDHCI_BUFFER);
334 *buf = scratch & 0xFF;
343 sg_miter_stop(&host->sg_miter);
345 local_irq_restore(flags);
348 static void sdhci_write_block_pio(struct sdhci_host *host)
351 size_t blksize, len, chunk;
355 DBG("PIO writing\n");
357 blksize = host->data->blksz;
361 local_irq_save(flags);
364 BUG_ON(!sg_miter_next(&host->sg_miter));
366 len = min(host->sg_miter.length, blksize);
369 host->sg_miter.consumed = len;
371 buf = host->sg_miter.addr;
374 scratch |= (u32)*buf << (chunk * 8);
380 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
381 sdhci_writel(host, scratch, SDHCI_BUFFER);
388 sg_miter_stop(&host->sg_miter);
390 local_irq_restore(flags);
393 static void sdhci_transfer_pio(struct sdhci_host *host)
399 if (host->blocks == 0)
402 if (host->data->flags & MMC_DATA_READ)
403 mask = SDHCI_DATA_AVAILABLE;
405 mask = SDHCI_SPACE_AVAILABLE;
408 * Some controllers (JMicron JMB38x) mess up the buffer bits
409 * for transfers < 4 bytes. As long as it is just one block,
410 * we can ignore the bits.
412 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
413 (host->data->blocks == 1))
416 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
417 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
420 if (host->data->flags & MMC_DATA_READ)
421 sdhci_read_block_pio(host);
423 sdhci_write_block_pio(host);
426 if (host->blocks == 0)
430 DBG("PIO transfer complete.\n");
433 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
435 local_irq_save(*flags);
436 return kmap_atomic(sg_page(sg)) + sg->offset;
439 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
441 kunmap_atomic(buffer);
442 local_irq_restore(*flags);
445 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
446 dma_addr_t addr, int len, unsigned cmd)
448 struct sdhci_adma2_64_desc *dma_desc = desc;
450 /* 32-bit and 64-bit descriptors have these members in same position */
451 dma_desc->cmd = cpu_to_le16(cmd);
452 dma_desc->len = cpu_to_le16(len);
453 dma_desc->addr_lo = cpu_to_le32((u32)addr);
455 if (host->flags & SDHCI_USE_64_BIT_DMA)
456 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
459 static void sdhci_adma_mark_end(void *desc)
461 struct sdhci_adma2_64_desc *dma_desc = desc;
463 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
464 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
467 static int sdhci_adma_table_pre(struct sdhci_host *host,
468 struct mmc_data *data)
475 dma_addr_t align_addr;
478 struct scatterlist *sg;
484 * The spec does not specify endianness of descriptor table.
485 * We currently guess that it is LE.
488 if (data->flags & MMC_DATA_READ)
489 direction = DMA_FROM_DEVICE;
491 direction = DMA_TO_DEVICE;
493 host->align_addr = dma_map_single(mmc_dev(host->mmc),
494 host->align_buffer, host->align_buffer_sz, direction);
495 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
497 BUG_ON(host->align_addr & host->align_mask);
499 host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
500 if (host->sg_count < 0)
503 desc = host->adma_table;
504 align = host->align_buffer;
506 align_addr = host->align_addr;
508 for_each_sg(data->sg, sg, host->sg_count, i) {
509 addr = sg_dma_address(sg);
510 len = sg_dma_len(sg);
513 * The SDHCI specification states that ADMA
514 * addresses must be 32-bit aligned. If they
515 * aren't, then we use a bounce buffer for
516 * the (up to three) bytes that screw up the
519 offset = (host->align_sz - (addr & host->align_mask)) &
522 if (data->flags & MMC_DATA_WRITE) {
523 buffer = sdhci_kmap_atomic(sg, &flags);
524 memcpy(align, buffer, offset);
525 sdhci_kunmap_atomic(buffer, &flags);
529 sdhci_adma_write_desc(host, desc, align_addr, offset,
532 BUG_ON(offset > 65536);
534 align += host->align_sz;
535 align_addr += host->align_sz;
537 desc += host->desc_sz;
546 sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
547 desc += host->desc_sz;
550 * If this triggers then we have a calculation bug
553 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
556 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
558 * Mark the last descriptor as the terminating descriptor
560 if (desc != host->adma_table) {
561 desc -= host->desc_sz;
562 sdhci_adma_mark_end(desc);
566 * Add a terminating entry.
569 /* nop, end, valid */
570 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
574 * Resync align buffer as we might have changed it.
576 if (data->flags & MMC_DATA_WRITE) {
577 dma_sync_single_for_device(mmc_dev(host->mmc),
578 host->align_addr, host->align_buffer_sz, direction);
584 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
585 host->align_buffer_sz, direction);
590 static void sdhci_adma_table_post(struct sdhci_host *host,
591 struct mmc_data *data)
595 struct scatterlist *sg;
602 if (data->flags & MMC_DATA_READ)
603 direction = DMA_FROM_DEVICE;
605 direction = DMA_TO_DEVICE;
607 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
608 host->align_buffer_sz, direction);
610 /* Do a quick scan of the SG list for any unaligned mappings */
611 has_unaligned = false;
612 for_each_sg(data->sg, sg, host->sg_count, i)
613 if (sg_dma_address(sg) & host->align_mask) {
614 has_unaligned = true;
618 if (has_unaligned && data->flags & MMC_DATA_READ) {
619 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
620 data->sg_len, direction);
622 align = host->align_buffer;
624 for_each_sg(data->sg, sg, host->sg_count, i) {
625 if (sg_dma_address(sg) & host->align_mask) {
626 size = host->align_sz -
627 (sg_dma_address(sg) & host->align_mask);
629 buffer = sdhci_kmap_atomic(sg, &flags);
630 memcpy(buffer, align, size);
631 sdhci_kunmap_atomic(buffer, &flags);
633 align += host->align_sz;
638 if (!data->host_cookie)
639 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
640 data->sg_len, direction);
643 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
646 struct mmc_data *data = cmd->data;
647 unsigned target_timeout, current_timeout;
650 * If the host controller provides us with an incorrect timeout
651 * value, just skip the check and use 0xE. The hardware may take
652 * longer to time out, but that's much better than having a too-short
655 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
658 /* Unspecified timeout, assume max */
659 if (!data && !cmd->busy_timeout)
664 target_timeout = cmd->busy_timeout * 1000;
666 target_timeout = data->timeout_ns / 1000;
668 target_timeout += data->timeout_clks / host->clock;
672 * Figure out needed cycles.
673 * We do this in steps in order to fit inside a 32 bit int.
674 * The first step is the minimum timeout, which will have a
675 * minimum resolution of 6 bits:
676 * (1) 2^13*1000 > 2^22,
677 * (2) host->timeout_clk < 2^16
682 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
683 while (current_timeout < target_timeout) {
685 current_timeout <<= 1;
691 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
692 mmc_hostname(host->mmc), count, cmd->opcode);
699 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
701 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
702 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
704 if (host->flags & SDHCI_REQ_USE_DMA)
705 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
707 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
709 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
710 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
713 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
717 if (host->ops->set_timeout) {
718 host->ops->set_timeout(host, cmd);
720 count = sdhci_calc_timeout(host, cmd);
721 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
725 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
728 struct mmc_data *data = cmd->data;
733 if (data || (cmd->flags & MMC_RSP_BUSY))
734 sdhci_set_timeout(host, cmd);
740 BUG_ON(data->blksz * data->blocks > 524288);
741 BUG_ON(data->blksz > host->mmc->max_blk_size);
742 BUG_ON(data->blocks > 65535);
745 host->data_early = 0;
746 host->data->bytes_xfered = 0;
748 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
749 host->flags |= SDHCI_REQ_USE_DMA;
752 * FIXME: This doesn't account for merging when mapping the
755 if (host->flags & SDHCI_REQ_USE_DMA) {
757 struct scatterlist *sg;
760 if (host->flags & SDHCI_USE_ADMA) {
761 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
764 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
768 if (unlikely(broken)) {
769 for_each_sg(data->sg, sg, data->sg_len, i) {
770 if (sg->length & 0x3) {
771 DBG("Reverting to PIO because of "
772 "transfer size (%d)\n",
774 host->flags &= ~SDHCI_REQ_USE_DMA;
782 * The assumption here being that alignment is the same after
783 * translation to device address space.
785 if (host->flags & SDHCI_REQ_USE_DMA) {
787 struct scatterlist *sg;
790 if (host->flags & SDHCI_USE_ADMA) {
792 * As we use 3 byte chunks to work around
793 * alignment problems, we need to check this
796 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
799 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
803 if (unlikely(broken)) {
804 for_each_sg(data->sg, sg, data->sg_len, i) {
805 if (sg->offset & 0x3) {
806 DBG("Reverting to PIO because of "
808 host->flags &= ~SDHCI_REQ_USE_DMA;
815 if (host->flags & SDHCI_REQ_USE_DMA) {
816 if (host->flags & SDHCI_USE_ADMA) {
817 ret = sdhci_adma_table_pre(host, data);
820 * This only happens when someone fed
821 * us an invalid request.
824 host->flags &= ~SDHCI_REQ_USE_DMA;
826 sdhci_writel(host, host->adma_addr,
828 if (host->flags & SDHCI_USE_64_BIT_DMA)
830 (u64)host->adma_addr >> 32,
831 SDHCI_ADMA_ADDRESS_HI);
836 sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
839 * This only happens when someone fed
840 * us an invalid request.
843 host->flags &= ~SDHCI_REQ_USE_DMA;
845 WARN_ON(sg_cnt != 1);
846 sdhci_writel(host, sg_dma_address(data->sg),
853 * Always adjust the DMA selection as some controllers
854 * (e.g. JMicron) can't do PIO properly when the selection
857 if (host->version >= SDHCI_SPEC_200) {
858 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
859 ctrl &= ~SDHCI_CTRL_DMA_MASK;
860 if ((host->flags & SDHCI_REQ_USE_DMA) &&
861 (host->flags & SDHCI_USE_ADMA)) {
862 if (host->flags & SDHCI_USE_64_BIT_DMA)
863 ctrl |= SDHCI_CTRL_ADMA64;
865 ctrl |= SDHCI_CTRL_ADMA32;
867 ctrl |= SDHCI_CTRL_SDMA;
869 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
872 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
875 flags = SG_MITER_ATOMIC;
876 if (host->data->flags & MMC_DATA_READ)
877 flags |= SG_MITER_TO_SG;
879 flags |= SG_MITER_FROM_SG;
880 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
881 host->blocks = data->blocks;
884 sdhci_set_transfer_irqs(host);
886 /* Set the DMA boundary value and block size */
887 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
888 data->blksz), SDHCI_BLOCK_SIZE);
889 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
892 static void sdhci_set_transfer_mode(struct sdhci_host *host,
893 struct mmc_command *cmd)
896 struct mmc_data *data = cmd->data;
900 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
901 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
903 /* clear Auto CMD settings for no data CMDs */
904 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
905 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
906 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
911 WARN_ON(!host->data);
913 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
914 mode = SDHCI_TRNS_BLK_CNT_EN;
916 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
917 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
919 * If we are sending CMD23, CMD12 never gets sent
920 * on successful completion (so no Auto-CMD12).
922 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
923 (cmd->opcode != SD_IO_RW_EXTENDED))
924 mode |= SDHCI_TRNS_AUTO_CMD12;
925 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
926 mode |= SDHCI_TRNS_AUTO_CMD23;
927 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
931 if (data->flags & MMC_DATA_READ)
932 mode |= SDHCI_TRNS_READ;
933 if (host->flags & SDHCI_REQ_USE_DMA)
934 mode |= SDHCI_TRNS_DMA;
936 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
939 static void sdhci_finish_data(struct sdhci_host *host)
941 struct mmc_data *data;
948 if (host->flags & SDHCI_REQ_USE_DMA) {
949 if (host->flags & SDHCI_USE_ADMA)
950 sdhci_adma_table_post(host, data);
952 if (!data->host_cookie)
953 dma_unmap_sg(mmc_dev(host->mmc),
954 data->sg, data->sg_len,
955 (data->flags & MMC_DATA_READ) ?
956 DMA_FROM_DEVICE : DMA_TO_DEVICE);
961 * The specification states that the block count register must
962 * be updated, but it does not specify at what point in the
963 * data flow. That makes the register entirely useless to read
964 * back so we have to assume that nothing made it to the card
965 * in the event of an error.
968 data->bytes_xfered = 0;
970 data->bytes_xfered = data->blksz * data->blocks;
973 * Need to send CMD12 if -
974 * a) open-ended multiblock transfer (no CMD23)
975 * b) error in multiblock transfer
982 * The controller needs a reset of internal state machines
983 * upon error conditions.
986 sdhci_do_reset(host, SDHCI_RESET_CMD);
987 sdhci_do_reset(host, SDHCI_RESET_DATA);
990 sdhci_send_command(host, data->stop);
992 tasklet_schedule(&host->finish_tasklet);
995 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
999 unsigned long timeout;
1003 /* Wait max 10 ms */
1006 mask = SDHCI_CMD_INHIBIT;
1007 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1008 mask |= SDHCI_DATA_INHIBIT;
1010 /* We shouldn't wait for data inihibit for stop commands, even
1011 though they might use busy signaling */
1012 if (host->mrq->data && (cmd == host->mrq->data->stop))
1013 mask &= ~SDHCI_DATA_INHIBIT;
1015 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1017 pr_err("%s: Controller never released "
1018 "inhibit bit(s).\n", mmc_hostname(host->mmc));
1019 sdhci_dumpregs(host);
1021 tasklet_schedule(&host->finish_tasklet);
1029 if (!cmd->data && cmd->busy_timeout > 9000)
1030 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1033 mod_timer(&host->timer, timeout);
1036 host->busy_handle = 0;
1038 sdhci_prepare_data(host, cmd);
1040 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1042 sdhci_set_transfer_mode(host, cmd);
1044 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1045 pr_err("%s: Unsupported response type!\n",
1046 mmc_hostname(host->mmc));
1047 cmd->error = -EINVAL;
1048 tasklet_schedule(&host->finish_tasklet);
1052 if (!(cmd->flags & MMC_RSP_PRESENT))
1053 flags = SDHCI_CMD_RESP_NONE;
1054 else if (cmd->flags & MMC_RSP_136)
1055 flags = SDHCI_CMD_RESP_LONG;
1056 else if (cmd->flags & MMC_RSP_BUSY)
1057 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1059 flags = SDHCI_CMD_RESP_SHORT;
1061 if (cmd->flags & MMC_RSP_CRC)
1062 flags |= SDHCI_CMD_CRC;
1063 if (cmd->flags & MMC_RSP_OPCODE)
1064 flags |= SDHCI_CMD_INDEX;
1066 /* CMD19 is special in that the Data Present Select should be set */
1067 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1068 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1069 flags |= SDHCI_CMD_DATA;
1071 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1073 EXPORT_SYMBOL_GPL(sdhci_send_command);
1075 static void sdhci_finish_command(struct sdhci_host *host)
1079 BUG_ON(host->cmd == NULL);
1081 if (host->cmd->flags & MMC_RSP_PRESENT) {
1082 if (host->cmd->flags & MMC_RSP_136) {
1083 /* CRC is stripped so we need to do some shifting. */
1084 for (i = 0;i < 4;i++) {
1085 host->cmd->resp[i] = sdhci_readl(host,
1086 SDHCI_RESPONSE + (3-i)*4) << 8;
1088 host->cmd->resp[i] |=
1090 SDHCI_RESPONSE + (3-i)*4-1);
1093 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1097 host->cmd->error = 0;
1099 /* Finished CMD23, now send actual command. */
1100 if (host->cmd == host->mrq->sbc) {
1102 sdhci_send_command(host, host->mrq->cmd);
1105 /* Processed actual command. */
1106 if (host->data && host->data_early)
1107 sdhci_finish_data(host);
1109 if (!host->cmd->data)
1110 tasklet_schedule(&host->finish_tasklet);
1116 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1120 switch (host->timing) {
1121 case MMC_TIMING_UHS_SDR12:
1122 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1124 case MMC_TIMING_UHS_SDR25:
1125 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1127 case MMC_TIMING_UHS_SDR50:
1128 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1130 case MMC_TIMING_UHS_SDR104:
1131 case MMC_TIMING_MMC_HS200:
1132 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1134 case MMC_TIMING_UHS_DDR50:
1135 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1137 case MMC_TIMING_MMC_HS400:
1138 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1141 pr_warn("%s: Invalid UHS-I mode selected\n",
1142 mmc_hostname(host->mmc));
1143 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1149 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1151 int div = 0; /* Initialized for compiler warning */
1152 int real_div = div, clk_mul = 1;
1154 unsigned long timeout;
1156 host->mmc->actual_clock = 0;
1158 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1163 if (host->version >= SDHCI_SPEC_300) {
1164 if (host->preset_enabled) {
1167 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1168 pre_val = sdhci_get_preset_value(host);
1169 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1170 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1171 if (host->clk_mul &&
1172 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1173 clk = SDHCI_PROG_CLOCK_MODE;
1175 clk_mul = host->clk_mul;
1177 real_div = max_t(int, 1, div << 1);
1183 * Check if the Host Controller supports Programmable Clock
1186 if (host->clk_mul) {
1187 for (div = 1; div <= 1024; div++) {
1188 if ((host->max_clk * host->clk_mul / div)
1193 * Set Programmable Clock Mode in the Clock
1196 clk = SDHCI_PROG_CLOCK_MODE;
1198 clk_mul = host->clk_mul;
1201 /* Version 3.00 divisors must be a multiple of 2. */
1202 if (host->max_clk <= clock)
1205 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1207 if ((host->max_clk / div) <= clock)
1215 /* Version 2.00 divisors must be a power of 2. */
1216 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1217 if ((host->max_clk / div) <= clock)
1226 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1227 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1228 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1229 << SDHCI_DIVIDER_HI_SHIFT;
1230 clk |= SDHCI_CLOCK_INT_EN;
1231 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1233 /* Wait max 20 ms */
1235 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1236 & SDHCI_CLOCK_INT_STABLE)) {
1238 pr_err("%s: Internal clock never "
1239 "stabilised.\n", mmc_hostname(host->mmc));
1240 sdhci_dumpregs(host);
1247 clk |= SDHCI_CLOCK_CARD_EN;
1248 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1250 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1252 static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1255 struct mmc_host *mmc = host->mmc;
1258 if (!IS_ERR(mmc->supply.vmmc)) {
1259 spin_unlock_irq(&host->lock);
1260 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1261 spin_lock_irq(&host->lock);
1263 if (mode != MMC_POWER_OFF)
1264 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1266 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1271 if (mode != MMC_POWER_OFF) {
1273 case MMC_VDD_165_195:
1274 pwr = SDHCI_POWER_180;
1278 pwr = SDHCI_POWER_300;
1282 pwr = SDHCI_POWER_330;
1289 if (host->pwr == pwr)
1295 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1296 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1297 sdhci_runtime_pm_bus_off(host);
1301 * Spec says that we should clear the power reg before setting
1302 * a new value. Some controllers don't seem to like this though.
1304 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1305 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1308 * At least the Marvell CaFe chip gets confused if we set the
1309 * voltage and set turn on power at the same time, so set the
1312 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1313 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1315 pwr |= SDHCI_POWER_ON;
1317 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1319 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1320 sdhci_runtime_pm_bus_on(host);
1323 * Some controllers need an extra 10ms delay of 10ms before
1324 * they can apply clock after applying power
1326 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1331 /*****************************************************************************\
1335 \*****************************************************************************/
1337 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1339 struct sdhci_host *host;
1341 unsigned long flags;
1343 host = mmc_priv(mmc);
1345 sdhci_runtime_pm_get(host);
1347 /* Firstly check card presence */
1348 present = sdhci_do_get_cd(host);
1350 spin_lock_irqsave(&host->lock, flags);
1352 WARN_ON(host->mrq != NULL);
1354 #ifndef SDHCI_USE_LEDS_CLASS
1355 sdhci_activate_led(host);
1359 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1360 * requests if Auto-CMD12 is enabled.
1362 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1364 mrq->data->stop = NULL;
1371 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1372 host->mrq->cmd->error = -ENOMEDIUM;
1373 tasklet_schedule(&host->finish_tasklet);
1375 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1376 sdhci_send_command(host, mrq->sbc);
1378 sdhci_send_command(host, mrq->cmd);
1382 spin_unlock_irqrestore(&host->lock, flags);
1385 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1389 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1390 if (width == MMC_BUS_WIDTH_8) {
1391 ctrl &= ~SDHCI_CTRL_4BITBUS;
1392 if (host->version >= SDHCI_SPEC_300)
1393 ctrl |= SDHCI_CTRL_8BITBUS;
1395 if (host->version >= SDHCI_SPEC_300)
1396 ctrl &= ~SDHCI_CTRL_8BITBUS;
1397 if (width == MMC_BUS_WIDTH_4)
1398 ctrl |= SDHCI_CTRL_4BITBUS;
1400 ctrl &= ~SDHCI_CTRL_4BITBUS;
1402 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1404 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1406 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1410 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1411 /* Select Bus Speed Mode for host */
1412 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1413 if ((timing == MMC_TIMING_MMC_HS200) ||
1414 (timing == MMC_TIMING_UHS_SDR104))
1415 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1416 else if (timing == MMC_TIMING_UHS_SDR12)
1417 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1418 else if (timing == MMC_TIMING_UHS_SDR25)
1419 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1420 else if (timing == MMC_TIMING_UHS_SDR50)
1421 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1422 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1423 (timing == MMC_TIMING_MMC_DDR52))
1424 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1425 else if (timing == MMC_TIMING_MMC_HS400)
1426 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1427 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1429 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1431 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1433 unsigned long flags;
1435 struct mmc_host *mmc = host->mmc;
1437 spin_lock_irqsave(&host->lock, flags);
1439 if (host->flags & SDHCI_DEVICE_DEAD) {
1440 spin_unlock_irqrestore(&host->lock, flags);
1441 if (!IS_ERR(mmc->supply.vmmc) &&
1442 ios->power_mode == MMC_POWER_OFF)
1443 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1448 * Reset the chip on each power off.
1449 * Should clear out any weird states.
1451 if (ios->power_mode == MMC_POWER_OFF) {
1452 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1456 if (host->version >= SDHCI_SPEC_300 &&
1457 (ios->power_mode == MMC_POWER_UP) &&
1458 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1459 sdhci_enable_preset_value(host, false);
1461 if (!ios->clock || ios->clock != host->clock) {
1462 host->ops->set_clock(host, ios->clock);
1463 host->clock = ios->clock;
1465 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1467 host->timeout_clk = host->mmc->actual_clock ?
1468 host->mmc->actual_clock / 1000 :
1470 host->mmc->max_busy_timeout =
1471 host->ops->get_max_timeout_count ?
1472 host->ops->get_max_timeout_count(host) :
1474 host->mmc->max_busy_timeout /= host->timeout_clk;
1478 sdhci_set_power(host, ios->power_mode, ios->vdd);
1480 if (host->ops->platform_send_init_74_clocks)
1481 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1483 host->ops->set_bus_width(host, ios->bus_width);
1485 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1487 if ((ios->timing == MMC_TIMING_SD_HS ||
1488 ios->timing == MMC_TIMING_MMC_HS)
1489 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1490 ctrl |= SDHCI_CTRL_HISPD;
1492 ctrl &= ~SDHCI_CTRL_HISPD;
1494 if (host->version >= SDHCI_SPEC_300) {
1497 /* In case of UHS-I modes, set High Speed Enable */
1498 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1499 (ios->timing == MMC_TIMING_MMC_HS200) ||
1500 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1501 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1502 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1503 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1504 (ios->timing == MMC_TIMING_UHS_SDR25))
1505 ctrl |= SDHCI_CTRL_HISPD;
1507 if (!host->preset_enabled) {
1508 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1510 * We only need to set Driver Strength if the
1511 * preset value enable is not set.
1513 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1514 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1515 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1516 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1517 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1518 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1519 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1520 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1521 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1522 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1524 pr_warn("%s: invalid driver type, default to "
1525 "driver type B\n", mmc_hostname(mmc));
1526 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1529 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1532 * According to SDHC Spec v3.00, if the Preset Value
1533 * Enable in the Host Control 2 register is set, we
1534 * need to reset SD Clock Enable before changing High
1535 * Speed Enable to avoid generating clock gliches.
1538 /* Reset SD Clock Enable */
1539 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1540 clk &= ~SDHCI_CLOCK_CARD_EN;
1541 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1543 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1545 /* Re-enable SD Clock */
1546 host->ops->set_clock(host, host->clock);
1549 /* Reset SD Clock Enable */
1550 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1551 clk &= ~SDHCI_CLOCK_CARD_EN;
1552 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1554 host->ops->set_uhs_signaling(host, ios->timing);
1555 host->timing = ios->timing;
1557 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1558 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1559 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1560 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1561 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1562 (ios->timing == MMC_TIMING_UHS_DDR50))) {
1565 sdhci_enable_preset_value(host, true);
1566 preset = sdhci_get_preset_value(host);
1567 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1568 >> SDHCI_PRESET_DRV_SHIFT;
1571 /* Re-enable SD Clock */
1572 host->ops->set_clock(host, host->clock);
1574 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1577 * Some (ENE) controllers go apeshit on some ios operation,
1578 * signalling timeout and CRC errors even on CMD0. Resetting
1579 * it on each ios seems to solve the problem.
1581 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1582 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1585 spin_unlock_irqrestore(&host->lock, flags);
1588 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1590 struct sdhci_host *host = mmc_priv(mmc);
1592 sdhci_runtime_pm_get(host);
1593 sdhci_do_set_ios(host, ios);
1594 sdhci_runtime_pm_put(host);
1597 static int sdhci_do_get_cd(struct sdhci_host *host)
1599 int gpio_cd = mmc_gpio_get_cd(host->mmc);
1601 if (host->flags & SDHCI_DEVICE_DEAD)
1604 /* If polling/nonremovable, assume that the card is always present. */
1605 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
1606 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
1609 /* Try slot gpio detect */
1610 if (!IS_ERR_VALUE(gpio_cd))
1613 /* Host native card detect */
1614 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1617 static int sdhci_get_cd(struct mmc_host *mmc)
1619 struct sdhci_host *host = mmc_priv(mmc);
1622 sdhci_runtime_pm_get(host);
1623 ret = sdhci_do_get_cd(host);
1624 sdhci_runtime_pm_put(host);
1628 static int sdhci_check_ro(struct sdhci_host *host)
1630 unsigned long flags;
1633 spin_lock_irqsave(&host->lock, flags);
1635 if (host->flags & SDHCI_DEVICE_DEAD)
1637 else if (host->ops->get_ro)
1638 is_readonly = host->ops->get_ro(host);
1640 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1641 & SDHCI_WRITE_PROTECT);
1643 spin_unlock_irqrestore(&host->lock, flags);
1645 /* This quirk needs to be replaced by a callback-function later */
1646 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1647 !is_readonly : is_readonly;
1650 #define SAMPLE_COUNT 5
1652 static int sdhci_do_get_ro(struct sdhci_host *host)
1656 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1657 return sdhci_check_ro(host);
1660 for (i = 0; i < SAMPLE_COUNT; i++) {
1661 if (sdhci_check_ro(host)) {
1662 if (++ro_count > SAMPLE_COUNT / 2)
1670 static void sdhci_hw_reset(struct mmc_host *mmc)
1672 struct sdhci_host *host = mmc_priv(mmc);
1674 if (host->ops && host->ops->hw_reset)
1675 host->ops->hw_reset(host);
1678 static int sdhci_get_ro(struct mmc_host *mmc)
1680 struct sdhci_host *host = mmc_priv(mmc);
1683 sdhci_runtime_pm_get(host);
1684 ret = sdhci_do_get_ro(host);
1685 sdhci_runtime_pm_put(host);
1689 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1691 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1693 host->ier |= SDHCI_INT_CARD_INT;
1695 host->ier &= ~SDHCI_INT_CARD_INT;
1697 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1698 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1703 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1705 struct sdhci_host *host = mmc_priv(mmc);
1706 unsigned long flags;
1708 sdhci_runtime_pm_get(host);
1710 spin_lock_irqsave(&host->lock, flags);
1712 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1714 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1716 sdhci_enable_sdio_irq_nolock(host, enable);
1717 spin_unlock_irqrestore(&host->lock, flags);
1719 sdhci_runtime_pm_put(host);
1722 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1723 struct mmc_ios *ios)
1725 struct mmc_host *mmc = host->mmc;
1730 * Signal Voltage Switching is only applicable for Host Controllers
1733 if (host->version < SDHCI_SPEC_300)
1736 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1738 switch (ios->signal_voltage) {
1739 case MMC_SIGNAL_VOLTAGE_330:
1740 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1741 ctrl &= ~SDHCI_CTRL_VDD_180;
1742 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1744 if (!IS_ERR(mmc->supply.vqmmc)) {
1745 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1748 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1754 usleep_range(5000, 5500);
1756 /* 3.3V regulator output should be stable within 5 ms */
1757 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1758 if (!(ctrl & SDHCI_CTRL_VDD_180))
1761 pr_warn("%s: 3.3V regulator output did not became stable\n",
1765 case MMC_SIGNAL_VOLTAGE_180:
1766 if (!IS_ERR(mmc->supply.vqmmc)) {
1767 ret = regulator_set_voltage(mmc->supply.vqmmc,
1770 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1777 * Enable 1.8V Signal Enable in the Host Control2
1780 ctrl |= SDHCI_CTRL_VDD_180;
1781 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1783 /* Some controller need to do more when switching */
1784 if (host->ops->voltage_switch)
1785 host->ops->voltage_switch(host);
1787 /* 1.8V regulator output should be stable within 5 ms */
1788 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1789 if (ctrl & SDHCI_CTRL_VDD_180)
1792 pr_warn("%s: 1.8V regulator output did not became stable\n",
1796 case MMC_SIGNAL_VOLTAGE_120:
1797 if (!IS_ERR(mmc->supply.vqmmc)) {
1798 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1801 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1808 /* No signal voltage switch required */
1813 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1814 struct mmc_ios *ios)
1816 struct sdhci_host *host = mmc_priv(mmc);
1819 if (host->version < SDHCI_SPEC_300)
1821 sdhci_runtime_pm_get(host);
1822 err = sdhci_do_start_signal_voltage_switch(host, ios);
1823 sdhci_runtime_pm_put(host);
1827 static int sdhci_card_busy(struct mmc_host *mmc)
1829 struct sdhci_host *host = mmc_priv(mmc);
1832 sdhci_runtime_pm_get(host);
1833 /* Check whether DAT[3:0] is 0000 */
1834 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1835 sdhci_runtime_pm_put(host);
1837 return !(present_state & SDHCI_DATA_LVL_MASK);
1840 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1842 struct sdhci_host *host = mmc_priv(mmc);
1843 unsigned long flags;
1845 spin_lock_irqsave(&host->lock, flags);
1846 host->flags |= SDHCI_HS400_TUNING;
1847 spin_unlock_irqrestore(&host->lock, flags);
1852 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1854 struct sdhci_host *host = mmc_priv(mmc);
1856 int tuning_loop_counter = MAX_TUNING_LOOP;
1858 unsigned long flags;
1859 unsigned int tuning_count = 0;
1862 sdhci_runtime_pm_get(host);
1863 spin_lock_irqsave(&host->lock, flags);
1865 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1866 host->flags &= ~SDHCI_HS400_TUNING;
1868 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1869 tuning_count = host->tuning_count;
1872 * The Host Controller needs tuning only in case of SDR104 mode
1873 * and for SDR50 mode when Use Tuning for SDR50 is set in the
1874 * Capabilities register.
1875 * If the Host Controller supports the HS200 mode then the
1876 * tuning function has to be executed.
1878 switch (host->timing) {
1879 /* HS400 tuning is done in HS200 mode */
1880 case MMC_TIMING_MMC_HS400:
1884 case MMC_TIMING_MMC_HS200:
1886 * Periodic re-tuning for HS400 is not expected to be needed, so
1893 case MMC_TIMING_UHS_SDR104:
1896 case MMC_TIMING_UHS_SDR50:
1897 if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
1898 host->flags & SDHCI_SDR104_NEEDS_TUNING)
1906 if (host->ops->platform_execute_tuning) {
1907 spin_unlock_irqrestore(&host->lock, flags);
1908 err = host->ops->platform_execute_tuning(host, opcode);
1909 sdhci_runtime_pm_put(host);
1913 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1914 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1915 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1916 ctrl |= SDHCI_CTRL_TUNED_CLK;
1917 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1920 * As per the Host Controller spec v3.00, tuning command
1921 * generates Buffer Read Ready interrupt, so enable that.
1923 * Note: The spec clearly says that when tuning sequence
1924 * is being performed, the controller does not generate
1925 * interrupts other than Buffer Read Ready interrupt. But
1926 * to make sure we don't hit a controller bug, we _only_
1927 * enable Buffer Read Ready interrupt here.
1929 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1930 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1933 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1934 * of loops reaches 40 times or a timeout of 150ms occurs.
1937 struct mmc_command cmd = {0};
1938 struct mmc_request mrq = {NULL};
1940 cmd.opcode = opcode;
1942 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1947 if (tuning_loop_counter-- == 0)
1954 * In response to CMD19, the card sends 64 bytes of tuning
1955 * block to the Host Controller. So we set the block size
1958 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1959 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1960 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
1962 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1963 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1966 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1971 * The tuning block is sent by the card to the host controller.
1972 * So we set the TRNS_READ bit in the Transfer Mode register.
1973 * This also takes care of setting DMA Enable and Multi Block
1974 * Select in the same register to 0.
1976 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1978 sdhci_send_command(host, &cmd);
1983 spin_unlock_irqrestore(&host->lock, flags);
1984 /* Wait for Buffer Read Ready interrupt */
1985 wait_event_interruptible_timeout(host->buf_ready_int,
1986 (host->tuning_done == 1),
1987 msecs_to_jiffies(50));
1988 spin_lock_irqsave(&host->lock, flags);
1990 if (!host->tuning_done) {
1991 pr_info(DRIVER_NAME ": Timeout waiting for "
1992 "Buffer Read Ready interrupt during tuning "
1993 "procedure, falling back to fixed sampling "
1995 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1996 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1997 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1998 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2004 host->tuning_done = 0;
2006 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2008 /* eMMC spec does not require a delay between tuning cycles */
2009 if (opcode == MMC_SEND_TUNING_BLOCK)
2011 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
2014 * The Host Driver has exhausted the maximum number of loops allowed,
2015 * so use fixed sampling frequency.
2017 if (tuning_loop_counter < 0) {
2018 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2019 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2021 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2022 pr_info(DRIVER_NAME ": Tuning procedure"
2023 " failed, falling back to fixed sampling"
2031 * In case tuning fails, host controllers which support
2032 * re-tuning can try tuning again at a later time, when the
2033 * re-tuning timer expires. So for these controllers, we
2034 * return 0. Since there might be other controllers who do not
2035 * have this capability, we return error for them.
2040 host->mmc->retune_period = err ? 0 : tuning_count;
2042 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2043 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2045 spin_unlock_irqrestore(&host->lock, flags);
2046 sdhci_runtime_pm_put(host);
2051 static int sdhci_select_drive_strength(struct mmc_card *card,
2052 unsigned int max_dtr, int host_drv,
2053 int card_drv, int *drv_type)
2055 struct sdhci_host *host = mmc_priv(card->host);
2057 if (!host->ops->select_drive_strength)
2060 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2061 card_drv, drv_type);
2064 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2066 /* Host Controller v3.00 defines preset value registers */
2067 if (host->version < SDHCI_SPEC_300)
2071 * We only enable or disable Preset Value if they are not already
2072 * enabled or disabled respectively. Otherwise, we bail out.
2074 if (host->preset_enabled != enable) {
2075 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2078 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2080 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2082 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2085 host->flags |= SDHCI_PV_ENABLED;
2087 host->flags &= ~SDHCI_PV_ENABLED;
2089 host->preset_enabled = enable;
2093 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2096 struct sdhci_host *host = mmc_priv(mmc);
2097 struct mmc_data *data = mrq->data;
2099 if (host->flags & SDHCI_REQ_USE_DMA) {
2100 if (data->host_cookie)
2101 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2102 data->flags & MMC_DATA_WRITE ?
2103 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2104 mrq->data->host_cookie = 0;
2108 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
2109 struct mmc_data *data,
2110 struct sdhci_host_next *next)
2114 if (!next && data->host_cookie &&
2115 data->host_cookie != host->next_data.cookie) {
2116 pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
2117 __func__, data->host_cookie, host->next_data.cookie);
2118 data->host_cookie = 0;
2121 /* Check if next job is already prepared */
2123 (!next && data->host_cookie != host->next_data.cookie)) {
2124 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
2126 data->flags & MMC_DATA_WRITE ?
2127 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2130 sg_count = host->next_data.sg_count;
2131 host->next_data.sg_count = 0;
2139 next->sg_count = sg_count;
2140 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
2142 host->sg_count = sg_count;
2147 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2150 struct sdhci_host *host = mmc_priv(mmc);
2152 if (mrq->data->host_cookie) {
2153 mrq->data->host_cookie = 0;
2157 if (host->flags & SDHCI_REQ_USE_DMA)
2158 if (sdhci_pre_dma_transfer(host,
2160 &host->next_data) < 0)
2161 mrq->data->host_cookie = 0;
2164 static void sdhci_card_event(struct mmc_host *mmc)
2166 struct sdhci_host *host = mmc_priv(mmc);
2167 unsigned long flags;
2170 /* First check if client has provided their own card event */
2171 if (host->ops->card_event)
2172 host->ops->card_event(host);
2174 present = sdhci_do_get_cd(host);
2176 spin_lock_irqsave(&host->lock, flags);
2178 /* Check host->mrq first in case we are runtime suspended */
2179 if (host->mrq && !present) {
2180 pr_err("%s: Card removed during transfer!\n",
2181 mmc_hostname(host->mmc));
2182 pr_err("%s: Resetting controller.\n",
2183 mmc_hostname(host->mmc));
2185 sdhci_do_reset(host, SDHCI_RESET_CMD);
2186 sdhci_do_reset(host, SDHCI_RESET_DATA);
2188 host->mrq->cmd->error = -ENOMEDIUM;
2189 tasklet_schedule(&host->finish_tasklet);
2192 spin_unlock_irqrestore(&host->lock, flags);
2195 static const struct mmc_host_ops sdhci_ops = {
2196 .request = sdhci_request,
2197 .post_req = sdhci_post_req,
2198 .pre_req = sdhci_pre_req,
2199 .set_ios = sdhci_set_ios,
2200 .get_cd = sdhci_get_cd,
2201 .get_ro = sdhci_get_ro,
2202 .hw_reset = sdhci_hw_reset,
2203 .enable_sdio_irq = sdhci_enable_sdio_irq,
2204 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2205 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2206 .execute_tuning = sdhci_execute_tuning,
2207 .select_drive_strength = sdhci_select_drive_strength,
2208 .card_event = sdhci_card_event,
2209 .card_busy = sdhci_card_busy,
2212 /*****************************************************************************\
2216 \*****************************************************************************/
2218 static void sdhci_tasklet_finish(unsigned long param)
2220 struct sdhci_host *host;
2221 unsigned long flags;
2222 struct mmc_request *mrq;
2224 host = (struct sdhci_host*)param;
2226 spin_lock_irqsave(&host->lock, flags);
2229 * If this tasklet gets rescheduled while running, it will
2230 * be run again afterwards but without any active request.
2233 spin_unlock_irqrestore(&host->lock, flags);
2237 del_timer(&host->timer);
2242 * The controller needs a reset of internal state machines
2243 * upon error conditions.
2245 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2246 ((mrq->cmd && mrq->cmd->error) ||
2247 (mrq->sbc && mrq->sbc->error) ||
2248 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
2249 (mrq->data->stop && mrq->data->stop->error))) ||
2250 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2252 /* Some controllers need this kick or reset won't work here */
2253 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2254 /* This is to force an update */
2255 host->ops->set_clock(host, host->clock);
2257 /* Spec says we should do both at the same time, but Ricoh
2258 controllers do not like that. */
2259 sdhci_do_reset(host, SDHCI_RESET_CMD);
2260 sdhci_do_reset(host, SDHCI_RESET_DATA);
2267 #ifndef SDHCI_USE_LEDS_CLASS
2268 sdhci_deactivate_led(host);
2272 spin_unlock_irqrestore(&host->lock, flags);
2274 mmc_request_done(host->mmc, mrq);
2275 sdhci_runtime_pm_put(host);
2278 static void sdhci_timeout_timer(unsigned long data)
2280 struct sdhci_host *host;
2281 unsigned long flags;
2283 host = (struct sdhci_host*)data;
2285 spin_lock_irqsave(&host->lock, flags);
2288 pr_err("%s: Timeout waiting for hardware "
2289 "interrupt.\n", mmc_hostname(host->mmc));
2290 sdhci_dumpregs(host);
2293 host->data->error = -ETIMEDOUT;
2294 sdhci_finish_data(host);
2297 host->cmd->error = -ETIMEDOUT;
2299 host->mrq->cmd->error = -ETIMEDOUT;
2301 tasklet_schedule(&host->finish_tasklet);
2306 spin_unlock_irqrestore(&host->lock, flags);
2309 /*****************************************************************************\
2311 * Interrupt handling *
2313 \*****************************************************************************/
2315 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2317 BUG_ON(intmask == 0);
2320 pr_err("%s: Got command interrupt 0x%08x even "
2321 "though no command operation was in progress.\n",
2322 mmc_hostname(host->mmc), (unsigned)intmask);
2323 sdhci_dumpregs(host);
2327 if (intmask & SDHCI_INT_TIMEOUT)
2328 host->cmd->error = -ETIMEDOUT;
2329 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
2331 host->cmd->error = -EILSEQ;
2333 if (host->cmd->error) {
2334 tasklet_schedule(&host->finish_tasklet);
2339 * The host can send and interrupt when the busy state has
2340 * ended, allowing us to wait without wasting CPU cycles.
2341 * Unfortunately this is overloaded on the "data complete"
2342 * interrupt, so we need to take some care when handling
2345 * Note: The 1.0 specification is a bit ambiguous about this
2346 * feature so there might be some problems with older
2349 if (host->cmd->flags & MMC_RSP_BUSY) {
2350 if (host->cmd->data)
2351 DBG("Cannot wait for busy signal when also "
2352 "doing a data transfer");
2353 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
2354 && !host->busy_handle) {
2355 /* Mark that command complete before busy is ended */
2356 host->busy_handle = 1;
2360 /* The controller does not support the end-of-busy IRQ,
2361 * fall through and take the SDHCI_INT_RESPONSE */
2362 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2363 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
2364 *mask &= ~SDHCI_INT_DATA_END;
2367 if (intmask & SDHCI_INT_RESPONSE)
2368 sdhci_finish_command(host);
2371 #ifdef CONFIG_MMC_DEBUG
2372 static void sdhci_adma_show_error(struct sdhci_host *host)
2374 const char *name = mmc_hostname(host->mmc);
2375 void *desc = host->adma_table;
2377 sdhci_dumpregs(host);
2380 struct sdhci_adma2_64_desc *dma_desc = desc;
2382 if (host->flags & SDHCI_USE_64_BIT_DMA)
2383 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2384 name, desc, le32_to_cpu(dma_desc->addr_hi),
2385 le32_to_cpu(dma_desc->addr_lo),
2386 le16_to_cpu(dma_desc->len),
2387 le16_to_cpu(dma_desc->cmd));
2389 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2390 name, desc, le32_to_cpu(dma_desc->addr_lo),
2391 le16_to_cpu(dma_desc->len),
2392 le16_to_cpu(dma_desc->cmd));
2394 desc += host->desc_sz;
2396 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2401 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2404 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2407 BUG_ON(intmask == 0);
2409 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2410 if (intmask & SDHCI_INT_DATA_AVAIL) {
2411 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2412 if (command == MMC_SEND_TUNING_BLOCK ||
2413 command == MMC_SEND_TUNING_BLOCK_HS200) {
2414 host->tuning_done = 1;
2415 wake_up(&host->buf_ready_int);
2422 * The "data complete" interrupt is also used to
2423 * indicate that a busy state has ended. See comment
2424 * above in sdhci_cmd_irq().
2426 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2427 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2428 host->cmd->error = -ETIMEDOUT;
2429 tasklet_schedule(&host->finish_tasklet);
2432 if (intmask & SDHCI_INT_DATA_END) {
2434 * Some cards handle busy-end interrupt
2435 * before the command completed, so make
2436 * sure we do things in the proper order.
2438 if (host->busy_handle)
2439 sdhci_finish_command(host);
2441 host->busy_handle = 1;
2446 pr_err("%s: Got data interrupt 0x%08x even "
2447 "though no data operation was in progress.\n",
2448 mmc_hostname(host->mmc), (unsigned)intmask);
2449 sdhci_dumpregs(host);
2454 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2455 host->data->error = -ETIMEDOUT;
2456 else if (intmask & SDHCI_INT_DATA_END_BIT)
2457 host->data->error = -EILSEQ;
2458 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2459 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2461 host->data->error = -EILSEQ;
2462 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2463 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2464 sdhci_adma_show_error(host);
2465 host->data->error = -EIO;
2466 if (host->ops->adma_workaround)
2467 host->ops->adma_workaround(host, intmask);
2470 if (host->data->error)
2471 sdhci_finish_data(host);
2473 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2474 sdhci_transfer_pio(host);
2477 * We currently don't do anything fancy with DMA
2478 * boundaries, but as we can't disable the feature
2479 * we need to at least restart the transfer.
2481 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2482 * should return a valid address to continue from, but as
2483 * some controllers are faulty, don't trust them.
2485 if (intmask & SDHCI_INT_DMA_END) {
2486 u32 dmastart, dmanow;
2487 dmastart = sg_dma_address(host->data->sg);
2488 dmanow = dmastart + host->data->bytes_xfered;
2490 * Force update to the next DMA block boundary.
2493 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2494 SDHCI_DEFAULT_BOUNDARY_SIZE;
2495 host->data->bytes_xfered = dmanow - dmastart;
2496 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2498 mmc_hostname(host->mmc), dmastart,
2499 host->data->bytes_xfered, dmanow);
2500 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2503 if (intmask & SDHCI_INT_DATA_END) {
2506 * Data managed to finish before the
2507 * command completed. Make sure we do
2508 * things in the proper order.
2510 host->data_early = 1;
2512 sdhci_finish_data(host);
2518 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2520 irqreturn_t result = IRQ_NONE;
2521 struct sdhci_host *host = dev_id;
2522 u32 intmask, mask, unexpected = 0;
2525 spin_lock(&host->lock);
2527 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2528 spin_unlock(&host->lock);
2532 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2533 if (!intmask || intmask == 0xffffffff) {
2539 /* Clear selected interrupts. */
2540 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2541 SDHCI_INT_BUS_POWER);
2542 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2544 DBG("*** %s got interrupt: 0x%08x\n",
2545 mmc_hostname(host->mmc), intmask);
2547 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2548 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2552 * There is a observation on i.mx esdhc. INSERT
2553 * bit will be immediately set again when it gets
2554 * cleared, if a card is inserted. We have to mask
2555 * the irq to prevent interrupt storm which will
2556 * freeze the system. And the REMOVE gets the
2559 * More testing are needed here to ensure it works
2560 * for other platforms though.
2562 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2563 SDHCI_INT_CARD_REMOVE);
2564 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2565 SDHCI_INT_CARD_INSERT;
2566 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2567 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2569 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2570 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2572 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2573 SDHCI_INT_CARD_REMOVE);
2574 result = IRQ_WAKE_THREAD;
2577 if (intmask & SDHCI_INT_CMD_MASK)
2578 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2581 if (intmask & SDHCI_INT_DATA_MASK)
2582 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2584 if (intmask & SDHCI_INT_BUS_POWER)
2585 pr_err("%s: Card is consuming too much power!\n",
2586 mmc_hostname(host->mmc));
2588 if (intmask & SDHCI_INT_CARD_INT) {
2589 sdhci_enable_sdio_irq_nolock(host, false);
2590 host->thread_isr |= SDHCI_INT_CARD_INT;
2591 result = IRQ_WAKE_THREAD;
2594 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2595 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2596 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2597 SDHCI_INT_CARD_INT);
2600 unexpected |= intmask;
2601 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2604 if (result == IRQ_NONE)
2605 result = IRQ_HANDLED;
2607 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2608 } while (intmask && --max_loops);
2610 spin_unlock(&host->lock);
2613 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2614 mmc_hostname(host->mmc), unexpected);
2615 sdhci_dumpregs(host);
2621 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2623 struct sdhci_host *host = dev_id;
2624 unsigned long flags;
2627 spin_lock_irqsave(&host->lock, flags);
2628 isr = host->thread_isr;
2629 host->thread_isr = 0;
2630 spin_unlock_irqrestore(&host->lock, flags);
2632 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2633 sdhci_card_event(host->mmc);
2634 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2637 if (isr & SDHCI_INT_CARD_INT) {
2638 sdio_run_irqs(host->mmc);
2640 spin_lock_irqsave(&host->lock, flags);
2641 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2642 sdhci_enable_sdio_irq_nolock(host, true);
2643 spin_unlock_irqrestore(&host->lock, flags);
2646 return isr ? IRQ_HANDLED : IRQ_NONE;
2649 /*****************************************************************************\
2653 \*****************************************************************************/
2656 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2659 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2660 | SDHCI_WAKE_ON_INT;
2662 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2664 /* Avoid fake wake up */
2665 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2666 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2667 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2669 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2671 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2674 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2675 | SDHCI_WAKE_ON_INT;
2677 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2679 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2682 int sdhci_suspend_host(struct sdhci_host *host)
2684 sdhci_disable_card_detection(host);
2686 mmc_retune_timer_stop(host->mmc);
2687 mmc_retune_needed(host->mmc);
2689 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2691 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2692 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2693 free_irq(host->irq, host);
2695 sdhci_enable_irq_wakeups(host);
2696 enable_irq_wake(host->irq);
2701 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2703 int sdhci_resume_host(struct sdhci_host *host)
2707 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2708 if (host->ops->enable_dma)
2709 host->ops->enable_dma(host);
2712 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2713 ret = request_threaded_irq(host->irq, sdhci_irq,
2714 sdhci_thread_irq, IRQF_SHARED,
2715 mmc_hostname(host->mmc), host);
2719 sdhci_disable_irq_wakeups(host);
2720 disable_irq_wake(host->irq);
2723 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2724 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2725 /* Card keeps power but host controller does not */
2726 sdhci_init(host, 0);
2729 sdhci_do_set_ios(host, &host->mmc->ios);
2731 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2735 sdhci_enable_card_detection(host);
2740 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2742 static int sdhci_runtime_pm_get(struct sdhci_host *host)
2744 return pm_runtime_get_sync(host->mmc->parent);
2747 static int sdhci_runtime_pm_put(struct sdhci_host *host)
2749 pm_runtime_mark_last_busy(host->mmc->parent);
2750 return pm_runtime_put_autosuspend(host->mmc->parent);
2753 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2755 if (host->runtime_suspended || host->bus_on)
2757 host->bus_on = true;
2758 pm_runtime_get_noresume(host->mmc->parent);
2761 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
2763 if (host->runtime_suspended || !host->bus_on)
2765 host->bus_on = false;
2766 pm_runtime_put_noidle(host->mmc->parent);
2769 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2771 unsigned long flags;
2773 mmc_retune_timer_stop(host->mmc);
2774 mmc_retune_needed(host->mmc);
2776 spin_lock_irqsave(&host->lock, flags);
2777 host->ier &= SDHCI_INT_CARD_INT;
2778 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2779 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2780 spin_unlock_irqrestore(&host->lock, flags);
2782 synchronize_hardirq(host->irq);
2784 spin_lock_irqsave(&host->lock, flags);
2785 host->runtime_suspended = true;
2786 spin_unlock_irqrestore(&host->lock, flags);
2790 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2792 int sdhci_runtime_resume_host(struct sdhci_host *host)
2794 unsigned long flags;
2795 int host_flags = host->flags;
2797 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2798 if (host->ops->enable_dma)
2799 host->ops->enable_dma(host);
2802 sdhci_init(host, 0);
2804 /* Force clock and power re-program */
2807 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2808 sdhci_do_set_ios(host, &host->mmc->ios);
2810 if ((host_flags & SDHCI_PV_ENABLED) &&
2811 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2812 spin_lock_irqsave(&host->lock, flags);
2813 sdhci_enable_preset_value(host, true);
2814 spin_unlock_irqrestore(&host->lock, flags);
2817 spin_lock_irqsave(&host->lock, flags);
2819 host->runtime_suspended = false;
2821 /* Enable SDIO IRQ */
2822 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2823 sdhci_enable_sdio_irq_nolock(host, true);
2825 /* Enable Card Detection */
2826 sdhci_enable_card_detection(host);
2828 spin_unlock_irqrestore(&host->lock, flags);
2832 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2834 #endif /* CONFIG_PM */
2836 /*****************************************************************************\
2838 * Device allocation/registration *
2840 \*****************************************************************************/
2842 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2845 struct mmc_host *mmc;
2846 struct sdhci_host *host;
2848 WARN_ON(dev == NULL);
2850 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2852 return ERR_PTR(-ENOMEM);
2854 host = mmc_priv(mmc);
2860 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2862 int sdhci_add_host(struct sdhci_host *host)
2864 struct mmc_host *mmc;
2865 u32 caps[2] = {0, 0};
2866 u32 max_current_caps;
2867 unsigned int ocr_avail;
2868 unsigned int override_timeout_clk;
2871 WARN_ON(host == NULL);
2878 host->quirks = debug_quirks;
2880 host->quirks2 = debug_quirks2;
2882 override_timeout_clk = host->timeout_clk;
2884 sdhci_do_reset(host, SDHCI_RESET_ALL);
2886 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2887 host->version = (host->version & SDHCI_SPEC_VER_MASK)
2888 >> SDHCI_SPEC_VER_SHIFT;
2889 if (host->version > SDHCI_SPEC_300) {
2890 pr_err("%s: Unknown controller version (%d). "
2891 "You may experience problems.\n", mmc_hostname(mmc),
2895 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2896 sdhci_readl(host, SDHCI_CAPABILITIES);
2898 if (host->version >= SDHCI_SPEC_300)
2899 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2901 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2903 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2904 host->flags |= SDHCI_USE_SDMA;
2905 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2906 DBG("Controller doesn't have SDMA capability\n");
2908 host->flags |= SDHCI_USE_SDMA;
2910 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2911 (host->flags & SDHCI_USE_SDMA)) {
2912 DBG("Disabling DMA as it is marked broken\n");
2913 host->flags &= ~SDHCI_USE_SDMA;
2916 if ((host->version >= SDHCI_SPEC_200) &&
2917 (caps[0] & SDHCI_CAN_DO_ADMA2))
2918 host->flags |= SDHCI_USE_ADMA;
2920 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2921 (host->flags & SDHCI_USE_ADMA)) {
2922 DBG("Disabling ADMA as it is marked broken\n");
2923 host->flags &= ~SDHCI_USE_ADMA;
2927 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2928 * and *must* do 64-bit DMA. A driver has the opportunity to change
2929 * that during the first call to ->enable_dma(). Similarly
2930 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2933 if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
2934 host->flags |= SDHCI_USE_64_BIT_DMA;
2936 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2937 if (host->ops->enable_dma) {
2938 if (host->ops->enable_dma(host)) {
2939 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2942 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2947 /* SDMA does not support 64-bit DMA */
2948 if (host->flags & SDHCI_USE_64_BIT_DMA)
2949 host->flags &= ~SDHCI_USE_SDMA;
2951 if (host->flags & SDHCI_USE_ADMA) {
2953 * The DMA descriptor table size is calculated as the maximum
2954 * number of segments times 2, to allow for an alignment
2955 * descriptor for each segment, plus 1 for a nop end descriptor,
2956 * all multipled by the descriptor size.
2958 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2959 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2960 SDHCI_ADMA2_64_DESC_SZ;
2961 host->align_buffer_sz = SDHCI_MAX_SEGS *
2962 SDHCI_ADMA2_64_ALIGN;
2963 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
2964 host->align_sz = SDHCI_ADMA2_64_ALIGN;
2965 host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
2967 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2968 SDHCI_ADMA2_32_DESC_SZ;
2969 host->align_buffer_sz = SDHCI_MAX_SEGS *
2970 SDHCI_ADMA2_32_ALIGN;
2971 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
2972 host->align_sz = SDHCI_ADMA2_32_ALIGN;
2973 host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
2975 host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
2976 host->adma_table_sz,
2979 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
2980 if (!host->adma_table || !host->align_buffer) {
2981 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
2982 host->adma_table, host->adma_addr);
2983 kfree(host->align_buffer);
2984 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2986 host->flags &= ~SDHCI_USE_ADMA;
2987 host->adma_table = NULL;
2988 host->align_buffer = NULL;
2989 } else if (host->adma_addr & host->align_mask) {
2990 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2992 host->flags &= ~SDHCI_USE_ADMA;
2993 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
2994 host->adma_table, host->adma_addr);
2995 kfree(host->align_buffer);
2996 host->adma_table = NULL;
2997 host->align_buffer = NULL;
3002 * If we use DMA, then it's up to the caller to set the DMA
3003 * mask, but PIO does not need the hw shim so we set a new
3004 * mask here in that case.
3006 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3007 host->dma_mask = DMA_BIT_MASK(64);
3008 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3011 if (host->version >= SDHCI_SPEC_300)
3012 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
3013 >> SDHCI_CLOCK_BASE_SHIFT;
3015 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
3016 >> SDHCI_CLOCK_BASE_SHIFT;
3018 host->max_clk *= 1000000;
3019 if (host->max_clk == 0 || host->quirks &
3020 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3021 if (!host->ops->get_max_clock) {
3022 pr_err("%s: Hardware doesn't specify base clock "
3023 "frequency.\n", mmc_hostname(mmc));
3026 host->max_clk = host->ops->get_max_clock(host);
3029 host->next_data.cookie = 1;
3031 * In case of Host Controller v3.00, find out whether clock
3032 * multiplier is supported.
3034 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
3035 SDHCI_CLOCK_MUL_SHIFT;
3038 * In case the value in Clock Multiplier is 0, then programmable
3039 * clock mode is not supported, otherwise the actual clock
3040 * multiplier is one more than the value of Clock Multiplier
3041 * in the Capabilities Register.
3047 * Set host parameters.
3049 mmc->ops = &sdhci_ops;
3050 mmc->f_max = host->max_clk;
3051 if (host->ops->get_min_clock)
3052 mmc->f_min = host->ops->get_min_clock(host);
3053 else if (host->version >= SDHCI_SPEC_300) {
3054 if (host->clk_mul) {
3055 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3056 mmc->f_max = host->max_clk * host->clk_mul;
3058 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3060 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3062 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3063 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3064 SDHCI_TIMEOUT_CLK_SHIFT;
3065 if (host->timeout_clk == 0) {
3066 if (host->ops->get_timeout_clock) {
3068 host->ops->get_timeout_clock(host);
3070 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3076 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
3077 host->timeout_clk *= 1000;
3079 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3080 host->ops->get_max_timeout_count(host) : 1 << 27;
3081 mmc->max_busy_timeout /= host->timeout_clk;
3084 if (override_timeout_clk)
3085 host->timeout_clk = override_timeout_clk;
3087 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3088 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3090 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3091 host->flags |= SDHCI_AUTO_CMD12;
3093 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3094 if ((host->version >= SDHCI_SPEC_300) &&
3095 ((host->flags & SDHCI_USE_ADMA) ||
3096 !(host->flags & SDHCI_USE_SDMA)) &&
3097 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3098 host->flags |= SDHCI_AUTO_CMD23;
3099 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3101 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3105 * A controller may support 8-bit width, but the board itself
3106 * might not have the pins brought out. Boards that support
3107 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3108 * their platform code before calling sdhci_add_host(), and we
3109 * won't assume 8-bit width for hosts without that CAP.
3111 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3112 mmc->caps |= MMC_CAP_4_BIT_DATA;
3114 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3115 mmc->caps &= ~MMC_CAP_CMD23;
3117 if (caps[0] & SDHCI_CAN_DO_HISPD)
3118 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3120 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3121 !(mmc->caps & MMC_CAP_NONREMOVABLE))
3122 mmc->caps |= MMC_CAP_NEEDS_POLL;
3124 /* If there are external regulators, get them */
3125 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
3126 return -EPROBE_DEFER;
3128 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3129 if (!IS_ERR(mmc->supply.vqmmc)) {
3130 ret = regulator_enable(mmc->supply.vqmmc);
3131 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3133 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
3134 SDHCI_SUPPORT_SDR50 |
3135 SDHCI_SUPPORT_DDR50);
3137 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3138 mmc_hostname(mmc), ret);
3139 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3143 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3144 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3145 SDHCI_SUPPORT_DDR50);
3147 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3148 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3149 SDHCI_SUPPORT_DDR50))
3150 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3152 /* SDR104 supports also implies SDR50 support */
3153 if (caps[1] & SDHCI_SUPPORT_SDR104) {
3154 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3155 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3156 * field can be promoted to support HS200.
3158 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3159 mmc->caps2 |= MMC_CAP2_HS200;
3160 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3161 mmc->caps |= MMC_CAP_UHS_SDR50;
3163 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3164 (caps[1] & SDHCI_SUPPORT_HS400))
3165 mmc->caps2 |= MMC_CAP2_HS400;
3167 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3168 (IS_ERR(mmc->supply.vqmmc) ||
3169 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3171 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3173 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3174 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3175 mmc->caps |= MMC_CAP_UHS_DDR50;
3177 /* Does the host need tuning for SDR50? */
3178 if (caps[1] & SDHCI_USE_SDR50_TUNING)
3179 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3181 /* Does the host need tuning for SDR104 / HS200? */
3182 if (mmc->caps2 & MMC_CAP2_HS200)
3183 host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3185 /* Driver Type(s) (A, C, D) supported by the host */
3186 if (caps[1] & SDHCI_DRIVER_TYPE_A)
3187 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3188 if (caps[1] & SDHCI_DRIVER_TYPE_C)
3189 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3190 if (caps[1] & SDHCI_DRIVER_TYPE_D)
3191 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3193 /* Initial value for re-tuning timer count */
3194 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3195 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3198 * In case Re-tuning Timer is not disabled, the actual value of
3199 * re-tuning timer will be 2 ^ (n - 1).
3201 if (host->tuning_count)
3202 host->tuning_count = 1 << (host->tuning_count - 1);
3204 /* Re-tuning mode supported by the Host Controller */
3205 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3206 SDHCI_RETUNING_MODE_SHIFT;
3211 * According to SD Host Controller spec v3.00, if the Host System
3212 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3213 * the value is meaningful only if Voltage Support in the Capabilities
3214 * register is set. The actual current value is 4 times the register
3217 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3218 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3219 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3222 /* convert to SDHCI_MAX_CURRENT format */
3223 curr = curr/1000; /* convert to mA */
3224 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3226 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3228 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3229 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3230 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3234 if (caps[0] & SDHCI_CAN_VDD_330) {
3235 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3237 mmc->max_current_330 = ((max_current_caps &
3238 SDHCI_MAX_CURRENT_330_MASK) >>
3239 SDHCI_MAX_CURRENT_330_SHIFT) *
3240 SDHCI_MAX_CURRENT_MULTIPLIER;
3242 if (caps[0] & SDHCI_CAN_VDD_300) {
3243 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3245 mmc->max_current_300 = ((max_current_caps &
3246 SDHCI_MAX_CURRENT_300_MASK) >>
3247 SDHCI_MAX_CURRENT_300_SHIFT) *
3248 SDHCI_MAX_CURRENT_MULTIPLIER;
3250 if (caps[0] & SDHCI_CAN_VDD_180) {
3251 ocr_avail |= MMC_VDD_165_195;
3253 mmc->max_current_180 = ((max_current_caps &
3254 SDHCI_MAX_CURRENT_180_MASK) >>
3255 SDHCI_MAX_CURRENT_180_SHIFT) *
3256 SDHCI_MAX_CURRENT_MULTIPLIER;
3259 /* If OCR set by host, use it instead. */
3261 ocr_avail = host->ocr_mask;
3263 /* If OCR set by external regulators, give it highest prio. */
3265 ocr_avail = mmc->ocr_avail;
3267 mmc->ocr_avail = ocr_avail;
3268 mmc->ocr_avail_sdio = ocr_avail;
3269 if (host->ocr_avail_sdio)
3270 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3271 mmc->ocr_avail_sd = ocr_avail;
3272 if (host->ocr_avail_sd)
3273 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3274 else /* normal SD controllers don't support 1.8V */
3275 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3276 mmc->ocr_avail_mmc = ocr_avail;
3277 if (host->ocr_avail_mmc)
3278 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3280 if (mmc->ocr_avail == 0) {
3281 pr_err("%s: Hardware doesn't report any "
3282 "support voltages.\n", mmc_hostname(mmc));
3286 spin_lock_init(&host->lock);
3289 * Maximum number of segments. Depends on if the hardware
3290 * can do scatter/gather or not.
3292 if (host->flags & SDHCI_USE_ADMA)
3293 mmc->max_segs = SDHCI_MAX_SEGS;
3294 else if (host->flags & SDHCI_USE_SDMA)
3297 mmc->max_segs = SDHCI_MAX_SEGS;
3300 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3301 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3304 mmc->max_req_size = 524288;
3307 * Maximum segment size. Could be one segment with the maximum number
3308 * of bytes. When doing hardware scatter/gather, each entry cannot
3309 * be larger than 64 KiB though.
3311 if (host->flags & SDHCI_USE_ADMA) {
3312 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3313 mmc->max_seg_size = 65535;
3315 mmc->max_seg_size = 65536;
3317 mmc->max_seg_size = mmc->max_req_size;
3321 * Maximum block size. This varies from controller to controller and
3322 * is specified in the capabilities register.
3324 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3325 mmc->max_blk_size = 2;
3327 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3328 SDHCI_MAX_BLOCK_SHIFT;
3329 if (mmc->max_blk_size >= 3) {
3330 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3332 mmc->max_blk_size = 0;
3336 mmc->max_blk_size = 512 << mmc->max_blk_size;
3339 * Maximum block count.
3341 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3346 tasklet_init(&host->finish_tasklet,
3347 sdhci_tasklet_finish, (unsigned long)host);
3349 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3351 init_waitqueue_head(&host->buf_ready_int);
3353 sdhci_init(host, 0);
3355 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3356 IRQF_SHARED, mmc_hostname(mmc), host);
3358 pr_err("%s: Failed to request IRQ %d: %d\n",
3359 mmc_hostname(mmc), host->irq, ret);
3363 #ifdef CONFIG_MMC_DEBUG
3364 sdhci_dumpregs(host);
3367 #ifdef SDHCI_USE_LEDS_CLASS
3368 snprintf(host->led_name, sizeof(host->led_name),
3369 "%s::", mmc_hostname(mmc));
3370 host->led.name = host->led_name;
3371 host->led.brightness = LED_OFF;
3372 host->led.default_trigger = mmc_hostname(mmc);
3373 host->led.brightness_set = sdhci_led_control;
3375 ret = led_classdev_register(mmc_dev(mmc), &host->led);
3377 pr_err("%s: Failed to register LED device: %d\n",
3378 mmc_hostname(mmc), ret);
3387 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3388 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3389 (host->flags & SDHCI_USE_ADMA) ?
3390 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3391 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3393 sdhci_enable_card_detection(host);
3397 #ifdef SDHCI_USE_LEDS_CLASS
3399 sdhci_do_reset(host, SDHCI_RESET_ALL);
3400 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3401 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3402 free_irq(host->irq, host);
3405 tasklet_kill(&host->finish_tasklet);
3410 EXPORT_SYMBOL_GPL(sdhci_add_host);
3412 void sdhci_remove_host(struct sdhci_host *host, int dead)
3414 struct mmc_host *mmc = host->mmc;
3415 unsigned long flags;
3418 spin_lock_irqsave(&host->lock, flags);
3420 host->flags |= SDHCI_DEVICE_DEAD;
3423 pr_err("%s: Controller removed during "
3424 " transfer!\n", mmc_hostname(mmc));
3426 host->mrq->cmd->error = -ENOMEDIUM;
3427 tasklet_schedule(&host->finish_tasklet);
3430 spin_unlock_irqrestore(&host->lock, flags);
3433 sdhci_disable_card_detection(host);
3435 mmc_remove_host(mmc);
3437 #ifdef SDHCI_USE_LEDS_CLASS
3438 led_classdev_unregister(&host->led);
3442 sdhci_do_reset(host, SDHCI_RESET_ALL);
3444 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3445 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3446 free_irq(host->irq, host);
3448 del_timer_sync(&host->timer);
3450 tasklet_kill(&host->finish_tasklet);
3452 if (!IS_ERR(mmc->supply.vqmmc))
3453 regulator_disable(mmc->supply.vqmmc);
3455 if (host->adma_table)
3456 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3457 host->adma_table, host->adma_addr);
3458 kfree(host->align_buffer);
3460 host->adma_table = NULL;
3461 host->align_buffer = NULL;
3464 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3466 void sdhci_free_host(struct sdhci_host *host)
3468 mmc_free_host(host->mmc);
3471 EXPORT_SYMBOL_GPL(sdhci_free_host);
3473 /*****************************************************************************\
3475 * Driver init/exit *
3477 \*****************************************************************************/
3479 static int __init sdhci_drv_init(void)
3482 ": Secure Digital Host Controller Interface driver\n");
3483 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3488 static void __exit sdhci_drv_exit(void)
3492 module_init(sdhci_drv_init);
3493 module_exit(sdhci_drv_exit);
3495 module_param(debug_quirks, uint, 0444);
3496 module_param(debug_quirks2, uint, 0444);
3498 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3499 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3500 MODULE_LICENSE("GPL");
3502 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3503 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");