2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/wakelock.h>
27 #include <linux/mmc/card.h>
28 #include <linux/mmc/host.h>
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/sd.h>
41 static struct workqueue_struct *workqueue;
42 static struct wake_lock mmc_delayed_work_wake_lock;
45 * Enabling software CRCs on the data blocks can be a significant (30%)
46 * performance cost, and for other reasons may not always be desired.
47 * So we allow it it to be disabled.
50 module_param(use_spi_crc, bool, 0);
53 * We normally treat cards as removed during suspend if they are not
54 * known to be on a non-removable bus, to avoid the risk of writing
55 * back data to a different card after resume. Allow this to be
56 * overridden if necessary.
58 #ifdef CONFIG_MMC_UNSAFE_RESUME
59 int mmc_assume_removable;
61 int mmc_assume_removable = 1;
63 module_param_named(removable, mmc_assume_removable, bool, 0644);
66 "MMC/SD cards are removable and may be removed during suspend");
69 * Internal function. Schedule delayed work in the MMC work queue.
71 static int mmc_schedule_delayed_work(struct delayed_work *work,
74 wake_lock(&mmc_delayed_work_wake_lock);
75 return queue_delayed_work(workqueue, work, delay);
79 * Internal function. Flush all scheduled work from the MMC work queue.
81 static void mmc_flush_scheduled_work(void)
83 flush_workqueue(workqueue);
87 * mmc_request_done - finish processing an MMC request
88 * @host: MMC host which completed request
89 * @mrq: MMC request which request
91 * MMC drivers should call this function when they have completed
92 * their processing of a request.
94 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
96 struct mmc_command *cmd = mrq->cmd;
99 if (err && cmd->retries && mmc_host_is_spi(host)) {
100 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
104 if (err && cmd->retries) {
105 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
106 mmc_hostname(host), cmd->opcode, err);
110 host->ops->request(host, mrq);
112 led_trigger_event(host->led, LED_OFF);
114 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
115 mmc_hostname(host), cmd->opcode, err,
116 cmd->resp[0], cmd->resp[1],
117 cmd->resp[2], cmd->resp[3]);
120 pr_debug("%s: %d bytes transferred: %d\n",
122 mrq->data->bytes_xfered, mrq->data->error);
126 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
127 mmc_hostname(host), mrq->stop->opcode,
129 mrq->stop->resp[0], mrq->stop->resp[1],
130 mrq->stop->resp[2], mrq->stop->resp[3]);
138 EXPORT_SYMBOL(mmc_request_done);
141 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
143 #ifdef CONFIG_MMC_DEBUG
145 struct scatterlist *sg;
148 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
149 mmc_hostname(host), mrq->cmd->opcode,
150 mrq->cmd->arg, mrq->cmd->flags);
153 pr_debug("%s: blksz %d blocks %d flags %08x "
154 "tsac %d ms nsac %d\n",
155 mmc_hostname(host), mrq->data->blksz,
156 mrq->data->blocks, mrq->data->flags,
157 mrq->data->timeout_ns / 1000000,
158 mrq->data->timeout_clks);
162 pr_debug("%s: CMD%u arg %08x flags %08x\n",
163 mmc_hostname(host), mrq->stop->opcode,
164 mrq->stop->arg, mrq->stop->flags);
167 WARN_ON(!host->claimed);
169 led_trigger_event(host->led, LED_FULL);
174 BUG_ON(mrq->data->blksz > host->max_blk_size);
175 BUG_ON(mrq->data->blocks > host->max_blk_count);
176 BUG_ON(mrq->data->blocks * mrq->data->blksz >
179 #ifdef CONFIG_MMC_DEBUG
181 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
183 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
186 mrq->cmd->data = mrq->data;
187 mrq->data->error = 0;
188 mrq->data->mrq = mrq;
190 mrq->data->stop = mrq->stop;
191 mrq->stop->error = 0;
192 mrq->stop->mrq = mrq;
195 host->ops->request(host, mrq);
198 static void mmc_wait_done(struct mmc_request *mrq)
200 complete(mrq->done_data);
204 * mmc_wait_for_req - start a request and wait for completion
205 * @host: MMC host to start command
206 * @mrq: MMC request to start
208 * Start a new MMC custom command request for a host, and wait
209 * for the command to complete. Does not attempt to parse the
212 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
214 DECLARE_COMPLETION_ONSTACK(complete);
216 mrq->done_data = &complete;
217 mrq->done = mmc_wait_done;
219 mmc_start_request(host, mrq);
221 wait_for_completion(&complete);
224 EXPORT_SYMBOL(mmc_wait_for_req);
227 * mmc_wait_for_cmd - start a command and wait for completion
228 * @host: MMC host to start command
229 * @cmd: MMC command to start
230 * @retries: maximum number of retries
232 * Start a new MMC command for a host, and wait for the command
233 * to complete. Return any error that occurred while the command
234 * was executing. Do not attempt to parse the response.
236 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
238 struct mmc_request mrq;
240 WARN_ON(!host->claimed);
242 memset(&mrq, 0, sizeof(struct mmc_request));
244 memset(cmd->resp, 0, sizeof(cmd->resp));
245 cmd->retries = retries;
250 mmc_wait_for_req(host, &mrq);
255 EXPORT_SYMBOL(mmc_wait_for_cmd);
258 * mmc_set_data_timeout - set the timeout for a data command
259 * @data: data phase for command
260 * @card: the MMC card associated with the data transfer
262 * Computes the data timeout parameters according to the
263 * correct algorithm given the card type.
265 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
270 * SDIO cards only define an upper 1 s limit on access.
272 if (mmc_card_sdio(card)) {
273 data->timeout_ns = 1000000000;
274 data->timeout_clks = 0;
279 * SD cards use a 100 multiplier rather than 10
281 mult = mmc_card_sd(card) ? 100 : 10;
284 * Scale up the multiplier (and therefore the timeout) by
285 * the r2w factor for writes.
287 if (data->flags & MMC_DATA_WRITE)
288 mult <<= card->csd.r2w_factor;
290 data->timeout_ns = card->csd.tacc_ns * mult;
291 data->timeout_clks = card->csd.tacc_clks * mult;
294 * SD cards also have an upper limit on the timeout.
296 if (mmc_card_sd(card)) {
297 unsigned int timeout_us, limit_us;
299 timeout_us = data->timeout_ns / 1000;
300 timeout_us += data->timeout_clks * 1000 /
301 (card->host->ios.clock / 1000);
303 if (data->flags & MMC_DATA_WRITE)
305 * The limit is really 250 ms, but that is
306 * insufficient for some crappy cards.
313 * SDHC cards always use these fixed values.
315 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
316 data->timeout_ns = limit_us * 1000;
317 data->timeout_clks = 0;
321 * Some cards need very high timeouts if driven in SPI mode.
322 * The worst observed timeout was 900ms after writing a
323 * continuous stream of data until the internal logic
326 if (mmc_host_is_spi(card->host)) {
327 if (data->flags & MMC_DATA_WRITE) {
328 if (data->timeout_ns < 1000000000)
329 data->timeout_ns = 1000000000; /* 1s */
331 if (data->timeout_ns < 100000000)
332 data->timeout_ns = 100000000; /* 100ms */
336 EXPORT_SYMBOL(mmc_set_data_timeout);
339 * mmc_align_data_size - pads a transfer size to a more optimal value
340 * @card: the MMC card associated with the data transfer
341 * @sz: original transfer size
343 * Pads the original data size with a number of extra bytes in
344 * order to avoid controller bugs and/or performance hits
345 * (e.g. some controllers revert to PIO for certain sizes).
347 * Returns the improved size, which might be unmodified.
349 * Note that this function is only relevant when issuing a
350 * single scatter gather entry.
352 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
355 * FIXME: We don't have a system for the controller to tell
356 * the core about its problems yet, so for now we just 32-bit
359 sz = ((sz + 3) / 4) * 4;
363 EXPORT_SYMBOL(mmc_align_data_size);
366 * mmc_host_enable - enable a host.
367 * @host: mmc host to enable
369 * Hosts that support power saving can use the 'enable' and 'disable'
370 * methods to exit and enter power saving states. For more information
371 * see comments for struct mmc_host_ops.
373 int mmc_host_enable(struct mmc_host *host)
375 if (!(host->caps & MMC_CAP_DISABLE))
378 if (host->en_dis_recurs)
381 if (host->nesting_cnt++)
384 cancel_delayed_work_sync(&host->disable);
389 if (host->ops->enable) {
392 host->en_dis_recurs = 1;
393 err = host->ops->enable(host);
394 host->en_dis_recurs = 0;
397 pr_debug("%s: enable error %d\n",
398 mmc_hostname(host), err);
405 EXPORT_SYMBOL(mmc_host_enable);
407 static int mmc_host_do_disable(struct mmc_host *host, int lazy)
409 if (host->ops->disable) {
412 host->en_dis_recurs = 1;
413 err = host->ops->disable(host, lazy);
414 host->en_dis_recurs = 0;
417 pr_debug("%s: disable error %d\n",
418 mmc_hostname(host), err);
422 unsigned long delay = msecs_to_jiffies(err);
424 mmc_schedule_delayed_work(&host->disable, delay);
432 * mmc_host_disable - disable a host.
433 * @host: mmc host to disable
435 * Hosts that support power saving can use the 'enable' and 'disable'
436 * methods to exit and enter power saving states. For more information
437 * see comments for struct mmc_host_ops.
439 int mmc_host_disable(struct mmc_host *host)
443 if (!(host->caps & MMC_CAP_DISABLE))
446 if (host->en_dis_recurs)
449 if (--host->nesting_cnt)
455 err = mmc_host_do_disable(host, 0);
458 EXPORT_SYMBOL(mmc_host_disable);
461 * __mmc_claim_host - exclusively claim a host
462 * @host: mmc host to claim
463 * @abort: whether or not the operation should be aborted
465 * Claim a host for a set of operations. If @abort is non null and
466 * dereference a non-zero value then this will return prematurely with
467 * that non-zero value without acquiring the lock. Returns zero
468 * with the lock held otherwise.
470 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
472 DECLARE_WAITQUEUE(wait, current);
478 add_wait_queue(&host->wq, &wait);
479 spin_lock_irqsave(&host->lock, flags);
481 set_current_state(TASK_UNINTERRUPTIBLE);
482 stop = abort ? atomic_read(abort) : 0;
483 if (stop || !host->claimed || host->claimer == current)
485 spin_unlock_irqrestore(&host->lock, flags);
487 spin_lock_irqsave(&host->lock, flags);
489 set_current_state(TASK_RUNNING);
492 host->claimer = current;
493 host->claim_cnt += 1;
496 spin_unlock_irqrestore(&host->lock, flags);
497 remove_wait_queue(&host->wq, &wait);
499 mmc_host_enable(host);
503 EXPORT_SYMBOL(__mmc_claim_host);
506 * mmc_try_claim_host - try exclusively to claim a host
507 * @host: mmc host to claim
509 * Returns %1 if the host is claimed, %0 otherwise.
511 int mmc_try_claim_host(struct mmc_host *host)
513 int claimed_host = 0;
516 spin_lock_irqsave(&host->lock, flags);
517 if (!host->claimed || host->claimer == current) {
519 host->claimer = current;
520 host->claim_cnt += 1;
523 spin_unlock_irqrestore(&host->lock, flags);
526 EXPORT_SYMBOL(mmc_try_claim_host);
528 static void mmc_do_release_host(struct mmc_host *host)
532 spin_lock_irqsave(&host->lock, flags);
533 if (--host->claim_cnt) {
534 /* Release for nested claim */
535 spin_unlock_irqrestore(&host->lock, flags);
538 host->claimer = NULL;
539 spin_unlock_irqrestore(&host->lock, flags);
544 void mmc_host_deeper_disable(struct work_struct *work)
546 struct mmc_host *host =
547 container_of(work, struct mmc_host, disable.work);
549 /* If the host is claimed then we do not want to disable it anymore */
550 if (!mmc_try_claim_host(host))
552 mmc_host_do_disable(host, 1);
553 mmc_do_release_host(host);
556 wake_unlock(&mmc_delayed_work_wake_lock);
560 * mmc_host_lazy_disable - lazily disable a host.
561 * @host: mmc host to disable
563 * Hosts that support power saving can use the 'enable' and 'disable'
564 * methods to exit and enter power saving states. For more information
565 * see comments for struct mmc_host_ops.
567 int mmc_host_lazy_disable(struct mmc_host *host)
569 if (!(host->caps & MMC_CAP_DISABLE))
572 if (host->en_dis_recurs)
575 if (--host->nesting_cnt)
581 if (host->disable_delay) {
582 mmc_schedule_delayed_work(&host->disable,
583 msecs_to_jiffies(host->disable_delay));
586 return mmc_host_do_disable(host, 1);
588 EXPORT_SYMBOL(mmc_host_lazy_disable);
591 * mmc_release_host - release a host
592 * @host: mmc host to release
594 * Release a MMC host, allowing others to claim the host
595 * for their operations.
597 void mmc_release_host(struct mmc_host *host)
599 WARN_ON(!host->claimed);
601 mmc_host_lazy_disable(host);
603 mmc_do_release_host(host);
606 EXPORT_SYMBOL(mmc_release_host);
609 * Internal function that does the actual ios call to the host driver,
610 * optionally printing some debug output.
612 static inline void mmc_set_ios(struct mmc_host *host)
614 struct mmc_ios *ios = &host->ios;
616 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
617 "width %u timing %u\n",
618 mmc_hostname(host), ios->clock, ios->bus_mode,
619 ios->power_mode, ios->chip_select, ios->vdd,
620 ios->bus_width, ios->timing);
622 host->ops->set_ios(host, ios);
626 * Control chip select pin on a host.
628 void mmc_set_chip_select(struct mmc_host *host, int mode)
630 host->ios.chip_select = mode;
635 * Sets the host clock to the highest possible frequency that
638 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
640 WARN_ON(hz < host->f_min);
642 if (hz > host->f_max)
645 host->ios.clock = hz;
650 * Change the bus mode (open drain/push-pull) of a host.
652 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
654 host->ios.bus_mode = mode;
659 * Change data bus width of a host.
661 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
663 host->ios.bus_width = width;
668 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
670 * @low_bits: prefer low bits in boundary cases
672 * This function returns the OCR bit number according to the provided @vdd
673 * value. If conversion is not possible a negative errno value returned.
675 * Depending on the @low_bits flag the function prefers low or high OCR bits
676 * on boundary voltages. For example,
677 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
678 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
680 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
682 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
684 const int max_bit = ilog2(MMC_VDD_35_36);
687 if (vdd < 1650 || vdd > 3600)
690 if (vdd >= 1650 && vdd <= 1950)
691 return ilog2(MMC_VDD_165_195);
696 /* Base 2000 mV, step 100 mV, bit's base 8. */
697 bit = (vdd - 2000) / 100 + 8;
704 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
705 * @vdd_min: minimum voltage value (mV)
706 * @vdd_max: maximum voltage value (mV)
708 * This function returns the OCR mask bits according to the provided @vdd_min
709 * and @vdd_max values. If conversion is not possible the function returns 0.
711 * Notes wrt boundary cases:
712 * This function sets the OCR bits for all boundary voltages, for example
713 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
714 * MMC_VDD_34_35 mask.
716 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
720 if (vdd_max < vdd_min)
723 /* Prefer high bits for the boundary vdd_max values. */
724 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
728 /* Prefer low bits for the boundary vdd_min values. */
729 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
733 /* Fill the mask, from max bit to min bit. */
734 while (vdd_max >= vdd_min)
735 mask |= 1 << vdd_max--;
739 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
741 #ifdef CONFIG_REGULATOR
744 * mmc_regulator_get_ocrmask - return mask of supported voltages
745 * @supply: regulator to use
747 * This returns either a negative errno, or a mask of voltages that
748 * can be provided to MMC/SD/SDIO devices using the specified voltage
749 * regulator. This would normally be called before registering the
752 int mmc_regulator_get_ocrmask(struct regulator *supply)
758 count = regulator_count_voltages(supply);
762 for (i = 0; i < count; i++) {
766 vdd_uV = regulator_list_voltage(supply, i);
770 vdd_mV = vdd_uV / 1000;
771 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
776 EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
779 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
780 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
781 * @supply: regulator to use
783 * Returns zero on success, else negative errno.
785 * MMC host drivers may use this to enable or disable a regulator using
786 * a particular supply voltage. This would normally be called from the
789 int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
795 enabled = regulator_is_enabled(supply);
803 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
804 * bits this regulator doesn't quite support ... don't
805 * be too picky, most cards and regulators are OK with
806 * a 0.1V range goof (it's a small error percentage).
808 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
810 min_uV = 1650 * 1000;
811 max_uV = 1950 * 1000;
813 min_uV = 1900 * 1000 + tmp * 100 * 1000;
814 max_uV = min_uV + 100 * 1000;
817 /* avoid needless changes to this voltage; the regulator
818 * might not allow this operation
820 voltage = regulator_get_voltage(supply);
823 else if (voltage < min_uV || voltage > max_uV)
824 result = regulator_set_voltage(supply, min_uV, max_uV);
828 if (result == 0 && !enabled)
829 result = regulator_enable(supply);
830 } else if (enabled) {
831 result = regulator_disable(supply);
836 EXPORT_SYMBOL(mmc_regulator_set_ocr);
841 * Mask off any voltages we don't support and select
844 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
848 ocr &= host->ocr_avail;
859 pr_warning("%s: host doesn't support card's voltages\n",
868 * Select timing parameters for host.
870 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
872 host->ios.timing = timing;
877 * Apply power to the MMC stack. This is a two-stage process.
878 * First, we enable power to the card without the clock running.
879 * We then wait a bit for the power to stabilise. Finally,
880 * enable the bus drivers and clock to the card.
882 * We must _NOT_ enable the clock prior to power stablising.
884 * If a host does all the power sequencing itself, ignore the
885 * initial MMC_POWER_UP stage.
887 static void mmc_power_up(struct mmc_host *host)
891 /* If ocr is set, we use it */
893 bit = ffs(host->ocr) - 1;
895 bit = fls(host->ocr_avail) - 1;
898 if (mmc_host_is_spi(host)) {
899 host->ios.chip_select = MMC_CS_HIGH;
900 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
902 host->ios.chip_select = MMC_CS_DONTCARE;
903 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
905 host->ios.power_mode = MMC_POWER_UP;
906 host->ios.bus_width = MMC_BUS_WIDTH_1;
907 host->ios.timing = MMC_TIMING_LEGACY;
911 * This delay should be sufficient to allow the power supply
912 * to reach the minimum voltage.
916 host->ios.clock = host->f_min;
918 host->ios.power_mode = MMC_POWER_ON;
922 * This delay must be at least 74 clock sizes, or 1 ms, or the
923 * time required to reach a stable voltage.
928 static void mmc_power_off(struct mmc_host *host)
932 if (!mmc_host_is_spi(host)) {
933 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
934 host->ios.chip_select = MMC_CS_DONTCARE;
936 host->ios.power_mode = MMC_POWER_OFF;
937 host->ios.bus_width = MMC_BUS_WIDTH_1;
938 host->ios.timing = MMC_TIMING_LEGACY;
943 * Cleanup when the last reference to the bus operator is dropped.
945 static void __mmc_release_bus(struct mmc_host *host)
948 BUG_ON(host->bus_refs);
949 BUG_ON(!host->bus_dead);
951 host->bus_ops = NULL;
955 * Increase reference count of bus operator
957 static inline void mmc_bus_get(struct mmc_host *host)
961 spin_lock_irqsave(&host->lock, flags);
963 spin_unlock_irqrestore(&host->lock, flags);
967 * Decrease reference count of bus operator and free it if
968 * it is the last reference.
970 static inline void mmc_bus_put(struct mmc_host *host)
974 spin_lock_irqsave(&host->lock, flags);
976 if ((host->bus_refs == 0) && host->bus_ops)
977 __mmc_release_bus(host);
978 spin_unlock_irqrestore(&host->lock, flags);
981 int mmc_resume_bus(struct mmc_host *host)
983 if (!mmc_bus_needs_resume(host))
986 printk("%s: Starting deferred resume\n", mmc_hostname(host));
987 host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
989 if (host->bus_ops && !host->bus_dead) {
991 BUG_ON(!host->bus_ops->resume);
992 host->bus_ops->resume(host);
995 if (host->bus_ops->detect && !host->bus_dead)
996 host->bus_ops->detect(host);
999 printk("%s: Deferred resume completed\n", mmc_hostname(host));
1003 EXPORT_SYMBOL(mmc_resume_bus);
1006 * Assign a mmc bus handler to a host. Only one bus handler may control a
1007 * host at any given time.
1009 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1011 unsigned long flags;
1016 WARN_ON(!host->claimed);
1018 spin_lock_irqsave(&host->lock, flags);
1020 BUG_ON(host->bus_ops);
1021 BUG_ON(host->bus_refs);
1023 host->bus_ops = ops;
1027 spin_unlock_irqrestore(&host->lock, flags);
1031 * Remove the current bus handler from a host. Assumes that there are
1032 * no interesting cards left, so the bus is powered down.
1034 void mmc_detach_bus(struct mmc_host *host)
1036 unsigned long flags;
1040 WARN_ON(!host->claimed);
1041 WARN_ON(!host->bus_ops);
1043 spin_lock_irqsave(&host->lock, flags);
1047 spin_unlock_irqrestore(&host->lock, flags);
1049 mmc_power_off(host);
1055 * mmc_detect_change - process change of state on a MMC socket
1056 * @host: host which changed state.
1057 * @delay: optional delay to wait before detection (jiffies)
1059 * MMC drivers should call this when they detect a card has been
1060 * inserted or removed. The MMC layer will confirm that any
1061 * present card is still functional, and initialize any newly
1064 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1066 #ifdef CONFIG_MMC_DEBUG
1067 unsigned long flags;
1068 spin_lock_irqsave(&host->lock, flags);
1069 WARN_ON(host->removed);
1070 spin_unlock_irqrestore(&host->lock, flags);
1073 mmc_schedule_delayed_work(&host->detect, delay);
1076 EXPORT_SYMBOL(mmc_detect_change);
1078 void mmc_init_erase(struct mmc_card *card)
1082 if (is_power_of_2(card->erase_size))
1083 card->erase_shift = ffs(card->erase_size) - 1;
1085 card->erase_shift = 0;
1088 * It is possible to erase an arbitrarily large area of an SD or MMC
1089 * card. That is not desirable because it can take a long time
1090 * (minutes) potentially delaying more important I/O, and also the
1091 * timeout calculations become increasingly hugely over-estimated.
1092 * Consequently, 'pref_erase' is defined as a guide to limit erases
1093 * to that size and alignment.
1095 * For SD cards that define Allocation Unit size, limit erases to one
1096 * Allocation Unit at a time. For MMC cards that define High Capacity
1097 * Erase Size, whether it is switched on or not, limit to that size.
1098 * Otherwise just have a stab at a good value. For modern cards it
1099 * will end up being 4MiB. Note that if the value is too small, it
1100 * can end up taking longer to erase.
1102 if (mmc_card_sd(card) && card->ssr.au) {
1103 card->pref_erase = card->ssr.au;
1104 card->erase_shift = ffs(card->ssr.au) - 1;
1105 } else if (card->ext_csd.hc_erase_size) {
1106 card->pref_erase = card->ext_csd.hc_erase_size;
1108 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1110 card->pref_erase = 512 * 1024 / 512;
1112 card->pref_erase = 1024 * 1024 / 512;
1114 card->pref_erase = 2 * 1024 * 1024 / 512;
1116 card->pref_erase = 4 * 1024 * 1024 / 512;
1117 if (card->pref_erase < card->erase_size)
1118 card->pref_erase = card->erase_size;
1120 sz = card->pref_erase % card->erase_size;
1122 card->pref_erase += card->erase_size - sz;
1127 static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
1128 struct mmc_command *cmd,
1129 unsigned int arg, unsigned int qty)
1131 unsigned int erase_timeout;
1133 if (card->ext_csd.erase_group_def & 1) {
1134 /* High Capacity Erase Group Size uses HC timeouts */
1135 if (arg == MMC_TRIM_ARG)
1136 erase_timeout = card->ext_csd.trim_timeout;
1138 erase_timeout = card->ext_csd.hc_erase_timeout;
1140 /* CSD Erase Group Size uses write timeout */
1141 unsigned int mult = (10 << card->csd.r2w_factor);
1142 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1143 unsigned int timeout_us;
1145 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1146 if (card->csd.tacc_ns < 1000000)
1147 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1149 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1152 * ios.clock is only a target. The real clock rate might be
1153 * less but not that much less, so fudge it by multiplying by 2.
1156 timeout_us += (timeout_clks * 1000) /
1157 (card->host->ios.clock / 1000);
1159 erase_timeout = timeout_us / 1000;
1162 * Theoretically, the calculation could underflow so round up
1163 * to 1ms in that case.
1169 /* Multiplier for secure operations */
1170 if (arg & MMC_SECURE_ARGS) {
1171 if (arg == MMC_SECURE_ERASE_ARG)
1172 erase_timeout *= card->ext_csd.sec_erase_mult;
1174 erase_timeout *= card->ext_csd.sec_trim_mult;
1177 erase_timeout *= qty;
1180 * Ensure at least a 1 second timeout for SPI as per
1181 * 'mmc_set_data_timeout()'
1183 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1184 erase_timeout = 1000;
1186 cmd->erase_timeout = erase_timeout;
1189 static void mmc_set_sd_erase_timeout(struct mmc_card *card,
1190 struct mmc_command *cmd, unsigned int arg,
1193 if (card->ssr.erase_timeout) {
1194 /* Erase timeout specified in SD Status Register (SSR) */
1195 cmd->erase_timeout = card->ssr.erase_timeout * qty +
1196 card->ssr.erase_offset;
1199 * Erase timeout not specified in SD Status Register (SSR) so
1200 * use 250ms per write block.
1202 cmd->erase_timeout = 250 * qty;
1205 /* Must not be less than 1 second */
1206 if (cmd->erase_timeout < 1000)
1207 cmd->erase_timeout = 1000;
1210 static void mmc_set_erase_timeout(struct mmc_card *card,
1211 struct mmc_command *cmd, unsigned int arg,
1214 if (mmc_card_sd(card))
1215 mmc_set_sd_erase_timeout(card, cmd, arg, qty);
1217 mmc_set_mmc_erase_timeout(card, cmd, arg, qty);
1220 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1221 unsigned int to, unsigned int arg)
1223 struct mmc_command cmd;
1224 unsigned int qty = 0;
1228 * qty is used to calculate the erase timeout which depends on how many
1229 * erase groups (or allocation units in SD terminology) are affected.
1230 * We count erasing part of an erase group as one erase group.
1231 * For SD, the allocation units are always a power of 2. For MMC, the
1232 * erase group size is almost certainly also power of 2, but it does not
1233 * seem to insist on that in the JEDEC standard, so we fall back to
1234 * division in that case. SD may not specify an allocation unit size,
1235 * in which case the timeout is based on the number of write blocks.
1237 * Note that the timeout for secure trim 2 will only be correct if the
1238 * number of erase groups specified is the same as the total of all
1239 * preceding secure trim 1 commands. Since the power may have been
1240 * lost since the secure trim 1 commands occurred, it is generally
1241 * impossible to calculate the secure trim 2 timeout correctly.
1243 if (card->erase_shift)
1244 qty += ((to >> card->erase_shift) -
1245 (from >> card->erase_shift)) + 1;
1246 else if (mmc_card_sd(card))
1247 qty += to - from + 1;
1249 qty += ((to / card->erase_size) -
1250 (from / card->erase_size)) + 1;
1252 if (!mmc_card_blockaddr(card)) {
1257 memset(&cmd, 0, sizeof(struct mmc_command));
1258 if (mmc_card_sd(card))
1259 cmd.opcode = SD_ERASE_WR_BLK_START;
1261 cmd.opcode = MMC_ERASE_GROUP_START;
1263 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1264 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1266 printk(KERN_ERR "mmc_erase: group start error %d, "
1267 "status %#x\n", err, cmd.resp[0]);
1272 memset(&cmd, 0, sizeof(struct mmc_command));
1273 if (mmc_card_sd(card))
1274 cmd.opcode = SD_ERASE_WR_BLK_END;
1276 cmd.opcode = MMC_ERASE_GROUP_END;
1278 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1279 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1281 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
1287 memset(&cmd, 0, sizeof(struct mmc_command));
1288 cmd.opcode = MMC_ERASE;
1290 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1291 mmc_set_erase_timeout(card, &cmd, arg, qty);
1292 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1294 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
1300 if (mmc_host_is_spi(card->host))
1304 memset(&cmd, 0, sizeof(struct mmc_command));
1305 cmd.opcode = MMC_SEND_STATUS;
1306 cmd.arg = card->rca << 16;
1307 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1308 /* Do not retry else we can't see errors */
1309 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1310 if (err || (cmd.resp[0] & 0xFDF92000)) {
1311 printk(KERN_ERR "error %d requesting status %#x\n",
1316 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1317 R1_CURRENT_STATE(cmd.resp[0]) == 7);
1323 * mmc_erase - erase sectors.
1324 * @card: card to erase
1325 * @from: first sector to erase
1326 * @nr: number of sectors to erase
1327 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1329 * Caller must claim host before calling this function.
1331 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1334 unsigned int rem, to = from + nr;
1336 if (!(card->host->caps & MMC_CAP_ERASE) ||
1337 !(card->csd.cmdclass & CCC_ERASE))
1340 if (!card->erase_size)
1343 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1346 if ((arg & MMC_SECURE_ARGS) &&
1347 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1350 if ((arg & MMC_TRIM_ARGS) &&
1351 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1354 if (arg == MMC_SECURE_ERASE_ARG) {
1355 if (from % card->erase_size || nr % card->erase_size)
1359 if (arg == MMC_ERASE_ARG) {
1360 rem = from % card->erase_size;
1362 rem = card->erase_size - rem;
1369 rem = nr % card->erase_size;
1382 /* 'from' and 'to' are inclusive */
1385 return mmc_do_erase(card, from, to, arg);
1387 EXPORT_SYMBOL(mmc_erase);
1389 int mmc_can_erase(struct mmc_card *card)
1391 if ((card->host->caps & MMC_CAP_ERASE) &&
1392 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1396 EXPORT_SYMBOL(mmc_can_erase);
1398 int mmc_can_trim(struct mmc_card *card)
1400 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1404 EXPORT_SYMBOL(mmc_can_trim);
1406 int mmc_can_secure_erase_trim(struct mmc_card *card)
1408 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1412 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1414 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1417 if (!card->erase_size)
1419 if (from % card->erase_size || nr % card->erase_size)
1423 EXPORT_SYMBOL(mmc_erase_group_aligned);
1425 void mmc_rescan(struct work_struct *work)
1427 struct mmc_host *host =
1428 container_of(work, struct mmc_host, detect.work);
1431 unsigned long flags;
1432 int extend_wakelock = 0;
1434 spin_lock_irqsave(&host->lock, flags);
1436 if (host->rescan_disable) {
1437 spin_unlock_irqrestore(&host->lock, flags);
1441 spin_unlock_irqrestore(&host->lock, flags);
1446 /* if there is a card registered, check whether it is still present */
1447 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
1448 host->bus_ops->detect(host);
1450 /* If the card was removed the bus will be marked
1451 * as dead - extend the wakelock so userspace
1454 extend_wakelock = 1;
1461 /* if there still is a card present, stop here */
1462 if (host->bus_ops != NULL) {
1467 /* detect a newly inserted card */
1470 * Only we can add a new handler, so it's safe to
1471 * release the lock here.
1475 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1478 mmc_claim_host(host);
1484 mmc_send_if_cond(host, host->ocr_avail);
1487 * First we search for SDIO...
1489 err = mmc_send_io_op_cond(host, 0, &ocr);
1491 if (mmc_attach_sdio(host, ocr)) {
1492 mmc_claim_host(host);
1493 /* try SDMEM (but not MMC) even if SDIO is broken */
1494 if (mmc_send_app_op_cond(host, 0, &ocr))
1497 if (mmc_attach_sd(host, ocr))
1498 mmc_power_off(host);
1499 extend_wakelock = 1;
1505 * ...then normal SD...
1507 err = mmc_send_app_op_cond(host, 0, &ocr);
1509 if (mmc_attach_sd(host, ocr))
1510 mmc_power_off(host);
1511 extend_wakelock = 1;
1516 * ...and finally MMC.
1518 err = mmc_send_op_cond(host, 0, &ocr);
1520 if (mmc_attach_mmc(host, ocr))
1521 mmc_power_off(host);
1522 extend_wakelock = 1;
1527 mmc_release_host(host);
1528 mmc_power_off(host);
1531 if (extend_wakelock)
1532 wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2);
1534 wake_unlock(&mmc_delayed_work_wake_lock);
1536 if (host->caps & MMC_CAP_NEEDS_POLL)
1537 mmc_schedule_delayed_work(&host->detect, HZ);
1540 void mmc_start_host(struct mmc_host *host)
1542 mmc_power_off(host);
1543 mmc_detect_change(host, 0);
1546 void mmc_stop_host(struct mmc_host *host)
1548 #ifdef CONFIG_MMC_DEBUG
1549 unsigned long flags;
1550 spin_lock_irqsave(&host->lock, flags);
1552 spin_unlock_irqrestore(&host->lock, flags);
1555 if (host->caps & MMC_CAP_DISABLE)
1556 cancel_delayed_work(&host->disable);
1557 cancel_delayed_work(&host->detect);
1558 mmc_flush_scheduled_work();
1560 /* clear pm flags now and let card drivers set them as needed */
1564 if (host->bus_ops && !host->bus_dead) {
1565 if (host->bus_ops->remove)
1566 host->bus_ops->remove(host);
1568 mmc_claim_host(host);
1569 mmc_detach_bus(host);
1570 mmc_release_host(host);
1578 mmc_power_off(host);
1581 void mmc_power_save_host(struct mmc_host *host)
1585 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1590 if (host->bus_ops->power_save)
1591 host->bus_ops->power_save(host);
1595 mmc_power_off(host);
1597 EXPORT_SYMBOL(mmc_power_save_host);
1599 void mmc_power_restore_host(struct mmc_host *host)
1603 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1609 host->bus_ops->power_restore(host);
1613 EXPORT_SYMBOL(mmc_power_restore_host);
1615 int mmc_card_awake(struct mmc_host *host)
1621 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1622 err = host->bus_ops->awake(host);
1628 EXPORT_SYMBOL(mmc_card_awake);
1630 int mmc_card_sleep(struct mmc_host *host)
1636 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1637 err = host->bus_ops->sleep(host);
1643 EXPORT_SYMBOL(mmc_card_sleep);
1645 int mmc_card_can_sleep(struct mmc_host *host)
1647 struct mmc_card *card = host->card;
1649 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
1653 EXPORT_SYMBOL(mmc_card_can_sleep);
1658 * mmc_suspend_host - suspend a host
1661 int mmc_suspend_host(struct mmc_host *host)
1665 if (mmc_bus_needs_resume(host))
1668 if (host->caps & MMC_CAP_DISABLE)
1669 cancel_delayed_work(&host->disable);
1670 cancel_delayed_work(&host->detect);
1671 mmc_flush_scheduled_work();
1674 if (host->bus_ops && !host->bus_dead) {
1675 if (host->bus_ops->suspend)
1676 err = host->bus_ops->suspend(host);
1680 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER))
1681 mmc_power_off(host);
1686 EXPORT_SYMBOL(mmc_suspend_host);
1689 * mmc_resume_host - resume a previously suspended host
1692 int mmc_resume_host(struct mmc_host *host)
1697 if (host->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME) {
1698 host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
1703 if (host->bus_ops && !host->bus_dead) {
1704 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
1706 mmc_select_voltage(host, host->ocr);
1708 BUG_ON(!host->bus_ops->resume);
1709 err = host->bus_ops->resume(host);
1711 printk(KERN_WARNING "%s: error %d during resume "
1712 "(card was removed?)\n",
1713 mmc_hostname(host), err);
1721 EXPORT_SYMBOL(mmc_resume_host);
1723 /* Do the card removal on suspend if card is assumed removeable
1724 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
1727 int mmc_pm_notify(struct notifier_block *notify_block,
1728 unsigned long mode, void *unused)
1730 struct mmc_host *host = container_of(
1731 notify_block, struct mmc_host, pm_notify);
1732 unsigned long flags;
1736 case PM_HIBERNATION_PREPARE:
1737 case PM_SUSPEND_PREPARE:
1739 spin_lock_irqsave(&host->lock, flags);
1740 host->rescan_disable = 1;
1741 spin_unlock_irqrestore(&host->lock, flags);
1742 cancel_delayed_work_sync(&host->detect);
1744 if (!host->bus_ops || host->bus_ops->suspend)
1747 mmc_claim_host(host);
1749 if (host->bus_ops->remove)
1750 host->bus_ops->remove(host);
1752 mmc_detach_bus(host);
1753 mmc_release_host(host);
1757 case PM_POST_SUSPEND:
1758 case PM_POST_HIBERNATION:
1760 spin_lock_irqsave(&host->lock, flags);
1761 host->rescan_disable = 0;
1762 spin_unlock_irqrestore(&host->lock, flags);
1763 mmc_detect_change(host, 0);
1771 #ifdef CONFIG_MMC_EMBEDDED_SDIO
1772 void mmc_set_embedded_sdio_data(struct mmc_host *host,
1773 struct sdio_cis *cis,
1774 struct sdio_cccr *cccr,
1775 struct sdio_embedded_func *funcs,
1778 host->embedded_sdio_data.cis = cis;
1779 host->embedded_sdio_data.cccr = cccr;
1780 host->embedded_sdio_data.funcs = funcs;
1781 host->embedded_sdio_data.num_funcs = num_funcs;
1784 EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
1787 static int __init mmc_init(void)
1791 wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND, "mmc_delayed_work");
1793 workqueue = create_singlethread_workqueue("kmmcd");
1797 ret = mmc_register_bus();
1799 goto destroy_workqueue;
1801 ret = mmc_register_host_class();
1803 goto unregister_bus;
1805 ret = sdio_register_bus();
1807 goto unregister_host_class;
1811 unregister_host_class:
1812 mmc_unregister_host_class();
1814 mmc_unregister_bus();
1816 destroy_workqueue(workqueue);
1821 static void __exit mmc_exit(void)
1823 sdio_unregister_bus();
1824 mmc_unregister_host_class();
1825 mmc_unregister_bus();
1826 destroy_workqueue(workqueue);
1827 wake_lock_destroy(&mmc_delayed_work_wake_lock);
1830 subsys_initcall(mmc_init);
1831 module_exit(mmc_exit);
1833 MODULE_LICENSE("GPL");