2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/mmc.h>
41 #include <linux/mmc/ioctl.h>
42 #include <linux/mmc/card.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sd.h>
47 #include <asm/uaccess.h>
51 MODULE_ALIAS("mmc:block");
52 #ifdef MODULE_PARAM_PREFIX
53 #undef MODULE_PARAM_PREFIX
55 #define MODULE_PARAM_PREFIX "mmcblk."
57 #define INAND_CMD38_ARG_EXT_CSD 113
58 #define INAND_CMD38_ARG_ERASE 0x00
59 #define INAND_CMD38_ARG_TRIM 0x01
60 #define INAND_CMD38_ARG_SECERASE 0x80
61 #define INAND_CMD38_ARG_SECTRIM1 0x81
62 #define INAND_CMD38_ARG_SECTRIM2 0x88
63 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
65 #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
66 (req->cmd_flags & REQ_META)) && \
67 (rq_data_dir(req) == WRITE))
68 #define PACKED_CMD_VER 0x01
69 #define PACKED_CMD_WR 0x02
71 static DEFINE_MUTEX(block_mutex);
74 * The defaults come from config options but can be overriden by module
77 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
80 * We've only got one major, so number of mmcblk devices is
81 * limited to 256 / number of minors per device.
83 static int max_devices;
85 /* 256 minors, so at most 256 separate devices */
86 static DECLARE_BITMAP(dev_use, 256);
87 static DECLARE_BITMAP(name_use, 256);
90 * There is one mmc_blk_data per slot.
95 struct mmc_queue queue;
96 struct list_head part;
99 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
100 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
101 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
104 unsigned int read_only;
105 unsigned int part_type;
106 unsigned int name_idx;
107 unsigned int reset_done;
108 #define MMC_BLK_READ BIT(0)
109 #define MMC_BLK_WRITE BIT(1)
110 #define MMC_BLK_DISCARD BIT(2)
111 #define MMC_BLK_SECDISCARD BIT(3)
114 * Only set in main mmc_blk_data associated
115 * with mmc_card with mmc_set_drvdata, and keeps
116 * track of the current selected device partition.
118 unsigned int part_curr;
119 struct device_attribute force_ro;
120 struct device_attribute power_ro_lock;
124 static DEFINE_MUTEX(open_lock);
127 MMC_PACKED_NR_IDX = -1,
129 MMC_PACKED_NR_SINGLE,
132 module_param(perdev_minors, int, 0444);
133 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
135 static inline int mmc_blk_part_switch(struct mmc_card *card,
136 struct mmc_blk_data *md);
137 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
139 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
141 struct mmc_packed *packed = mqrq->packed;
145 mqrq->cmd_type = MMC_PACKED_NONE;
146 packed->nr_entries = MMC_PACKED_NR_ZERO;
147 packed->idx_failure = MMC_PACKED_NR_IDX;
152 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
154 struct mmc_blk_data *md;
156 mutex_lock(&open_lock);
157 md = disk->private_data;
158 if (md && md->usage == 0)
162 mutex_unlock(&open_lock);
167 static inline int mmc_get_devidx(struct gendisk *disk)
169 int devidx = disk->first_minor / perdev_minors;
173 static void mmc_blk_put(struct mmc_blk_data *md)
175 mutex_lock(&open_lock);
177 if (md->usage == 0) {
178 int devidx = mmc_get_devidx(md->disk);
179 blk_cleanup_queue(md->queue.queue);
181 __clear_bit(devidx, dev_use);
186 mutex_unlock(&open_lock);
189 static ssize_t power_ro_lock_show(struct device *dev,
190 struct device_attribute *attr, char *buf)
193 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
194 struct mmc_card *card = md->queue.card;
197 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
199 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
202 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
207 static ssize_t power_ro_lock_store(struct device *dev,
208 struct device_attribute *attr, const char *buf, size_t count)
211 struct mmc_blk_data *md, *part_md;
212 struct mmc_card *card;
215 if (kstrtoul(buf, 0, &set))
221 md = mmc_blk_get(dev_to_disk(dev));
222 card = md->queue.card;
224 mmc_claim_host(card->host);
226 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
227 card->ext_csd.boot_ro_lock |
228 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
229 card->ext_csd.part_time);
231 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
233 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
235 mmc_release_host(card->host);
238 pr_info("%s: Locking boot partition ro until next power on\n",
239 md->disk->disk_name);
240 set_disk_ro(md->disk, 1);
242 list_for_each_entry(part_md, &md->part, part)
243 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
244 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
245 set_disk_ro(part_md->disk, 1);
253 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
257 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
259 ret = snprintf(buf, PAGE_SIZE, "%d\n",
260 get_disk_ro(dev_to_disk(dev)) ^
266 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
267 const char *buf, size_t count)
271 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
272 unsigned long set = simple_strtoul(buf, &end, 0);
278 set_disk_ro(dev_to_disk(dev), set || md->read_only);
285 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
287 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
290 mutex_lock(&block_mutex);
293 check_disk_change(bdev);
296 if ((mode & FMODE_WRITE) && md->read_only) {
301 mutex_unlock(&block_mutex);
306 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
308 struct mmc_blk_data *md = disk->private_data;
310 mutex_lock(&block_mutex);
312 mutex_unlock(&block_mutex);
316 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
318 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
324 struct mmc_blk_ioc_data {
325 struct mmc_ioc_cmd ic;
330 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
331 struct mmc_ioc_cmd __user *user)
333 struct mmc_blk_ioc_data *idata;
336 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
342 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
347 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
348 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
353 if (!idata->buf_bytes)
356 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
362 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
363 idata->ic.data_ptr, idata->buf_bytes)) {
378 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
384 if (!status || !retries_max)
388 err = get_card_status(card, status, 5);
392 if (!R1_STATUS(*status) &&
393 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
394 break; /* RPMB programming operation complete */
397 * Rechedule to give the MMC device a chance to continue
398 * processing the previous command without being polled too
401 usleep_range(1000, 5000);
402 } while (++retry_count < retries_max);
404 if (retry_count == retries_max)
410 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
411 struct mmc_ioc_cmd __user *ic_ptr)
413 struct mmc_blk_ioc_data *idata;
414 struct mmc_blk_data *md;
415 struct mmc_card *card;
416 struct mmc_command cmd = {0};
417 struct mmc_data data = {0};
418 struct mmc_request mrq = {NULL};
419 struct scatterlist sg;
425 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
426 * whole block device, not on a partition. This prevents overspray
427 * between sibling partitions.
429 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
432 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
434 return PTR_ERR(idata);
436 md = mmc_blk_get(bdev->bd_disk);
442 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
445 card = md->queue.card;
451 cmd.opcode = idata->ic.opcode;
452 cmd.arg = idata->ic.arg;
453 cmd.flags = idata->ic.flags;
455 if (idata->buf_bytes) {
458 data.blksz = idata->ic.blksz;
459 data.blocks = idata->ic.blocks;
461 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
463 if (idata->ic.write_flag)
464 data.flags = MMC_DATA_WRITE;
466 data.flags = MMC_DATA_READ;
468 /* data.flags must already be set before doing this. */
469 mmc_set_data_timeout(&data, card);
471 /* Allow overriding the timeout_ns for empirical tuning. */
472 if (idata->ic.data_timeout_ns)
473 data.timeout_ns = idata->ic.data_timeout_ns;
475 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
477 * Pretend this is a data transfer and rely on the
478 * host driver to compute timeout. When all host
479 * drivers support cmd.cmd_timeout for R1B, this
483 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
485 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
493 mmc_claim_host(card->host);
495 err = mmc_blk_part_switch(card, md);
499 if (idata->ic.is_acmd) {
500 err = mmc_app_cmd(card->host, card);
506 err = mmc_set_blockcount(card, data.blocks,
507 idata->ic.write_flag & (1 << 31));
512 mmc_wait_for_req(card->host, &mrq);
515 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
516 __func__, cmd.error);
521 dev_err(mmc_dev(card->host), "%s: data error %d\n",
522 __func__, data.error);
528 * According to the SD specs, some commands require a delay after
529 * issuing the command.
531 if (idata->ic.postsleep_min_us)
532 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
534 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
539 if (!idata->ic.write_flag) {
540 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
541 idata->buf, idata->buf_bytes)) {
549 * Ensure RPMB command has completed by polling CMD13
552 err = ioctl_rpmb_card_status_poll(card, &status, 5);
554 dev_err(mmc_dev(card->host),
555 "%s: Card Status=0x%08X, error %d\n",
556 __func__, status, err);
560 mmc_release_host(card->host);
570 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
571 unsigned int cmd, unsigned long arg)
574 if (cmd == MMC_IOC_CMD)
575 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
580 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
581 unsigned int cmd, unsigned long arg)
583 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
587 static const struct block_device_operations mmc_bdops = {
588 .open = mmc_blk_open,
589 .release = mmc_blk_release,
590 .getgeo = mmc_blk_getgeo,
591 .owner = THIS_MODULE,
592 .ioctl = mmc_blk_ioctl,
594 .compat_ioctl = mmc_blk_compat_ioctl,
598 static inline int mmc_blk_part_switch(struct mmc_card *card,
599 struct mmc_blk_data *md)
602 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
604 if (main_md->part_curr == md->part_type)
607 if (mmc_card_mmc(card)) {
608 u8 part_config = card->ext_csd.part_config;
610 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
611 part_config |= md->part_type;
613 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
614 EXT_CSD_PART_CONFIG, part_config,
615 card->ext_csd.part_time);
619 card->ext_csd.part_config = part_config;
622 main_md->part_curr = md->part_type;
626 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
632 struct mmc_request mrq = {NULL};
633 struct mmc_command cmd = {0};
634 struct mmc_data data = {0};
636 struct scatterlist sg;
638 cmd.opcode = MMC_APP_CMD;
639 cmd.arg = card->rca << 16;
640 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
642 err = mmc_wait_for_cmd(card->host, &cmd, 0);
645 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
648 memset(&cmd, 0, sizeof(struct mmc_command));
650 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
652 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
656 data.flags = MMC_DATA_READ;
659 mmc_set_data_timeout(&data, card);
664 blocks = kmalloc(4, GFP_KERNEL);
668 sg_init_one(&sg, blocks, 4);
670 mmc_wait_for_req(card->host, &mrq);
672 result = ntohl(*blocks);
675 if (cmd.error || data.error)
681 static int send_stop(struct mmc_card *card, u32 *status)
683 struct mmc_command cmd = {0};
686 cmd.opcode = MMC_STOP_TRANSMISSION;
687 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
688 err = mmc_wait_for_cmd(card->host, &cmd, 5);
690 *status = cmd.resp[0];
694 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
696 struct mmc_command cmd = {0};
699 cmd.opcode = MMC_SEND_STATUS;
700 if (!mmc_host_is_spi(card->host))
701 cmd.arg = card->rca << 16;
702 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
703 err = mmc_wait_for_cmd(card->host, &cmd, retries);
705 *status = cmd.resp[0];
709 #define ERR_NOMEDIUM 3
712 #define ERR_CONTINUE 0
714 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
715 bool status_valid, u32 status)
719 /* response crc error, retry the r/w cmd */
720 pr_err("%s: %s sending %s command, card status %#x\n",
721 req->rq_disk->disk_name, "response CRC error",
726 pr_err("%s: %s sending %s command, card status %#x\n",
727 req->rq_disk->disk_name, "timed out", name, status);
729 /* If the status cmd initially failed, retry the r/w cmd */
731 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
735 * If it was a r/w cmd crc error, or illegal command
736 * (eg, issued in wrong state) then retry - we should
737 * have corrected the state problem above.
739 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
740 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
744 /* Otherwise abort the command */
745 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
749 /* We don't understand the error code the driver gave us */
750 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
751 req->rq_disk->disk_name, error, status);
757 * Initial r/w and stop cmd error recovery.
758 * We don't know whether the card received the r/w cmd or not, so try to
759 * restore things back to a sane state. Essentially, we do this as follows:
760 * - Obtain card status. If the first attempt to obtain card status fails,
761 * the status word will reflect the failed status cmd, not the failed
762 * r/w cmd. If we fail to obtain card status, it suggests we can no
763 * longer communicate with the card.
764 * - Check the card state. If the card received the cmd but there was a
765 * transient problem with the response, it might still be in a data transfer
766 * mode. Try to send it a stop command. If this fails, we can't recover.
767 * - If the r/w cmd failed due to a response CRC error, it was probably
768 * transient, so retry the cmd.
769 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
770 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
771 * illegal cmd, retry.
772 * Otherwise we don't understand what happened, so abort.
774 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
775 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
777 bool prev_cmd_status_valid = true;
778 u32 status, stop_status = 0;
781 if (mmc_card_removed(card))
785 * Try to get card status which indicates both the card state
786 * and why there was no response. If the first attempt fails,
787 * we can't be sure the returned status is for the r/w command.
789 for (retry = 2; retry >= 0; retry--) {
790 err = get_card_status(card, &status, 0);
794 prev_cmd_status_valid = false;
795 pr_err("%s: error %d sending status command, %sing\n",
796 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
799 /* We couldn't get a response from the card. Give up. */
801 /* Check if the card is removed */
802 if (mmc_detect_card_removed(card->host))
807 /* Flag ECC errors */
808 if ((status & R1_CARD_ECC_FAILED) ||
809 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
810 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
813 /* Flag General errors */
814 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
815 if ((status & R1_ERROR) ||
816 (brq->stop.resp[0] & R1_ERROR)) {
817 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
818 req->rq_disk->disk_name, __func__,
819 brq->stop.resp[0], status);
824 * Check the current card state. If it is in some data transfer
825 * mode, tell it to stop (and hopefully transition back to TRAN.)
827 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
828 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
829 err = send_stop(card, &stop_status);
831 pr_err("%s: error %d sending stop command\n",
832 req->rq_disk->disk_name, err);
835 * If the stop cmd also timed out, the card is probably
836 * not present, so abort. Other errors are bad news too.
840 if (stop_status & R1_CARD_ECC_FAILED)
842 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
843 if (stop_status & R1_ERROR) {
844 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
845 req->rq_disk->disk_name, __func__,
851 /* Check for set block count errors */
853 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
854 prev_cmd_status_valid, status);
856 /* Check for r/w command errors */
858 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
859 prev_cmd_status_valid, status);
862 if (!brq->stop.error)
865 /* Now for stop errors. These aren't fatal to the transfer. */
866 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
867 req->rq_disk->disk_name, brq->stop.error,
868 brq->cmd.resp[0], status);
871 * Subsitute in our own stop status as this will give the error
872 * state which happened during the execution of the r/w command.
875 brq->stop.resp[0] = stop_status;
881 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
886 if (md->reset_done & type)
889 md->reset_done |= type;
890 err = mmc_hw_reset(host);
891 /* Ensure we switch back to the correct partition */
892 if (err != -EOPNOTSUPP) {
893 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
896 main_md->part_curr = main_md->part_type;
897 part_err = mmc_blk_part_switch(host->card, md);
900 * We have failed to get back into the correct
901 * partition, so we need to abort the whole request.
909 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
911 md->reset_done &= ~type;
914 int mmc_access_rpmb(struct mmc_queue *mq)
916 struct mmc_blk_data *md = mq->data;
918 * If this is a RPMB partition access, return ture
920 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
926 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
928 struct mmc_blk_data *md = mq->data;
929 struct mmc_card *card = md->queue.card;
930 unsigned int from, nr, arg;
931 int err = 0, type = MMC_BLK_DISCARD;
933 if (!mmc_can_erase(card)) {
938 from = blk_rq_pos(req);
939 nr = blk_rq_sectors(req);
941 if (mmc_can_discard(card))
942 arg = MMC_DISCARD_ARG;
943 else if (mmc_can_trim(card))
948 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
949 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
950 INAND_CMD38_ARG_EXT_CSD,
951 arg == MMC_TRIM_ARG ?
952 INAND_CMD38_ARG_TRIM :
953 INAND_CMD38_ARG_ERASE,
958 err = mmc_erase(card, from, nr, arg);
960 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
963 mmc_blk_reset_success(md, type);
964 blk_end_request(req, err, blk_rq_bytes(req));
969 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
972 struct mmc_blk_data *md = mq->data;
973 struct mmc_card *card = md->queue.card;
974 unsigned int from, nr, arg, trim_arg, erase_arg;
975 int err = 0, type = MMC_BLK_SECDISCARD;
977 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
982 from = blk_rq_pos(req);
983 nr = blk_rq_sectors(req);
985 /* The sanitize operation is supported at v4.5 only */
986 if (mmc_can_sanitize(card)) {
987 erase_arg = MMC_ERASE_ARG;
988 trim_arg = MMC_TRIM_ARG;
990 erase_arg = MMC_SECURE_ERASE_ARG;
991 trim_arg = MMC_SECURE_TRIM1_ARG;
994 if (mmc_erase_group_aligned(card, from, nr))
996 else if (mmc_can_trim(card))
1003 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1004 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1005 INAND_CMD38_ARG_EXT_CSD,
1006 arg == MMC_SECURE_TRIM1_ARG ?
1007 INAND_CMD38_ARG_SECTRIM1 :
1008 INAND_CMD38_ARG_SECERASE,
1014 err = mmc_erase(card, from, nr, arg);
1020 if (arg == MMC_SECURE_TRIM1_ARG) {
1021 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1022 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1023 INAND_CMD38_ARG_EXT_CSD,
1024 INAND_CMD38_ARG_SECTRIM2,
1030 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1037 if (mmc_can_sanitize(card)) {
1038 trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
1039 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1040 EXT_CSD_SANITIZE_START, 1, 0);
1041 trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
1044 if (err && !mmc_blk_reset(md, card->host, type))
1047 mmc_blk_reset_success(md, type);
1049 blk_end_request(req, err, blk_rq_bytes(req));
1054 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1056 struct mmc_blk_data *md = mq->data;
1057 struct mmc_card *card = md->queue.card;
1060 ret = mmc_flush_cache(card);
1064 blk_end_request_all(req, ret);
1070 * Reformat current write as a reliable write, supporting
1071 * both legacy and the enhanced reliable write MMC cards.
1072 * In each transfer we'll handle only as much as a single
1073 * reliable write can handle, thus finish the request in
1074 * partial completions.
1076 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1077 struct mmc_card *card,
1078 struct request *req)
1080 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1081 /* Legacy mode imposes restrictions on transfers. */
1082 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1083 brq->data.blocks = 1;
1085 if (brq->data.blocks > card->ext_csd.rel_sectors)
1086 brq->data.blocks = card->ext_csd.rel_sectors;
1087 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1088 brq->data.blocks = 1;
1092 #define CMD_ERRORS \
1093 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1094 R1_ADDRESS_ERROR | /* Misaligned address */ \
1095 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1096 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1097 R1_CC_ERROR | /* Card controller error */ \
1098 R1_ERROR) /* General/unknown error */
1100 static int mmc_blk_err_check(struct mmc_card *card,
1101 struct mmc_async_req *areq)
1103 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1105 struct mmc_blk_request *brq = &mq_mrq->brq;
1106 struct request *req = mq_mrq->req;
1107 int ecc_err = 0, gen_err = 0;
1110 * sbc.error indicates a problem with the set block count
1111 * command. No data will have been transferred.
1113 * cmd.error indicates a problem with the r/w command. No
1114 * data will have been transferred.
1116 * stop.error indicates a problem with the stop command. Data
1117 * may have been transferred, or may still be transferring.
1119 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1121 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1123 return MMC_BLK_RETRY;
1125 return MMC_BLK_ABORT;
1127 return MMC_BLK_NOMEDIUM;
1134 * Check for errors relating to the execution of the
1135 * initial command - such as address errors. No data
1136 * has been transferred.
1138 if (brq->cmd.resp[0] & CMD_ERRORS) {
1139 pr_err("%s: r/w command failed, status = %#x\n",
1140 req->rq_disk->disk_name, brq->cmd.resp[0]);
1141 return MMC_BLK_ABORT;
1145 * Everything else is either success, or a data error of some
1146 * kind. If it was a write, we may have transitioned to
1147 * program mode, which we have to wait for it to complete.
1149 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1151 unsigned long timeout;
1153 /* Check stop command response */
1154 if (brq->stop.resp[0] & R1_ERROR) {
1155 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1156 req->rq_disk->disk_name, __func__,
1161 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
1163 int err = get_card_status(card, &status, 5);
1165 pr_err("%s: error %d requesting status\n",
1166 req->rq_disk->disk_name, err);
1167 return MMC_BLK_CMD_ERR;
1170 if (status & R1_ERROR) {
1171 pr_err("%s: %s: general error sending status command, card status %#x\n",
1172 req->rq_disk->disk_name, __func__,
1177 /* Timeout if the device never becomes ready for data
1178 * and never leaves the program state.
1180 if (time_after(jiffies, timeout)) {
1181 pr_err("%s: Card stuck in programming state!"\
1182 " %s %s\n", mmc_hostname(card->host),
1183 req->rq_disk->disk_name, __func__);
1185 return MMC_BLK_CMD_ERR;
1188 * Some cards mishandle the status bits,
1189 * so make sure to check both the busy
1190 * indication and the card state.
1192 } while (!(status & R1_READY_FOR_DATA) ||
1193 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1196 /* if general error occurs, retry the write operation. */
1198 pr_warn("%s: retrying write for general error\n",
1199 req->rq_disk->disk_name);
1200 return MMC_BLK_RETRY;
1203 if (brq->data.error) {
1204 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1205 req->rq_disk->disk_name, brq->data.error,
1206 (unsigned)blk_rq_pos(req),
1207 (unsigned)blk_rq_sectors(req),
1208 brq->cmd.resp[0], brq->stop.resp[0]);
1210 if (rq_data_dir(req) == READ) {
1212 return MMC_BLK_ECC_ERR;
1213 return MMC_BLK_DATA_ERR;
1215 return MMC_BLK_CMD_ERR;
1219 if (!brq->data.bytes_xfered)
1220 return MMC_BLK_RETRY;
1222 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1223 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1224 return MMC_BLK_PARTIAL;
1226 return MMC_BLK_SUCCESS;
1229 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1230 return MMC_BLK_PARTIAL;
1232 return MMC_BLK_SUCCESS;
1235 static int mmc_blk_packed_err_check(struct mmc_card *card,
1236 struct mmc_async_req *areq)
1238 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1240 struct request *req = mq_rq->req;
1241 struct mmc_packed *packed = mq_rq->packed;
1242 int err, check, status;
1248 check = mmc_blk_err_check(card, areq);
1249 err = get_card_status(card, &status, 0);
1251 pr_err("%s: error %d sending status command\n",
1252 req->rq_disk->disk_name, err);
1253 return MMC_BLK_ABORT;
1256 if (status & R1_EXCEPTION_EVENT) {
1257 ext_csd = kzalloc(512, GFP_KERNEL);
1259 pr_err("%s: unable to allocate buffer for ext_csd\n",
1260 req->rq_disk->disk_name);
1264 err = mmc_send_ext_csd(card, ext_csd);
1266 pr_err("%s: error %d sending ext_csd\n",
1267 req->rq_disk->disk_name, err);
1268 check = MMC_BLK_ABORT;
1272 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1273 EXT_CSD_PACKED_FAILURE) &&
1274 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1275 EXT_CSD_PACKED_GENERIC_ERROR)) {
1276 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1277 EXT_CSD_PACKED_INDEXED_ERROR) {
1278 packed->idx_failure =
1279 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1280 check = MMC_BLK_PARTIAL;
1282 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1283 "failure index: %d\n",
1284 req->rq_disk->disk_name, packed->nr_entries,
1285 packed->blocks, packed->idx_failure);
1294 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1295 struct mmc_card *card,
1297 struct mmc_queue *mq)
1299 u32 readcmd, writecmd;
1300 struct mmc_blk_request *brq = &mqrq->brq;
1301 struct request *req = mqrq->req;
1302 struct mmc_blk_data *md = mq->data;
1306 * Reliable writes are used to implement Forced Unit Access and
1307 * REQ_META accesses, and are supported only on MMCs.
1309 * XXX: this really needs a good explanation of why REQ_META
1310 * is treated special.
1312 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1313 (req->cmd_flags & REQ_META)) &&
1314 (rq_data_dir(req) == WRITE) &&
1315 (md->flags & MMC_BLK_REL_WR);
1317 memset(brq, 0, sizeof(struct mmc_blk_request));
1318 brq->mrq.cmd = &brq->cmd;
1319 brq->mrq.data = &brq->data;
1321 brq->cmd.arg = blk_rq_pos(req);
1322 if (!mmc_card_blockaddr(card))
1324 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1325 brq->data.blksz = 512;
1326 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1328 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1329 brq->data.blocks = blk_rq_sectors(req);
1332 * The block layer doesn't support all sector count
1333 * restrictions, so we need to be prepared for too big
1336 if (brq->data.blocks > card->host->max_blk_count)
1337 brq->data.blocks = card->host->max_blk_count;
1339 if (brq->data.blocks > 1) {
1341 * After a read error, we redo the request one sector
1342 * at a time in order to accurately determine which
1343 * sectors can be read successfully.
1346 brq->data.blocks = 1;
1348 /* Some controllers can't do multiblock reads due to hw bugs */
1349 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1350 rq_data_dir(req) == READ)
1351 brq->data.blocks = 1;
1354 if (brq->data.blocks > 1 || do_rel_wr) {
1355 /* SPI multiblock writes terminate using a special
1356 * token, not a STOP_TRANSMISSION request.
1358 if (!mmc_host_is_spi(card->host) ||
1359 rq_data_dir(req) == READ)
1360 brq->mrq.stop = &brq->stop;
1361 readcmd = MMC_READ_MULTIPLE_BLOCK;
1362 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1364 brq->mrq.stop = NULL;
1365 readcmd = MMC_READ_SINGLE_BLOCK;
1366 writecmd = MMC_WRITE_BLOCK;
1368 if (rq_data_dir(req) == READ) {
1369 brq->cmd.opcode = readcmd;
1370 brq->data.flags |= MMC_DATA_READ;
1372 brq->cmd.opcode = writecmd;
1373 brq->data.flags |= MMC_DATA_WRITE;
1377 mmc_apply_rel_rw(brq, card, req);
1380 * Data tag is used only during writing meta data to speed
1381 * up write and any subsequent read of this meta data
1383 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1384 (req->cmd_flags & REQ_META) &&
1385 (rq_data_dir(req) == WRITE) &&
1386 ((brq->data.blocks * brq->data.blksz) >=
1387 card->ext_csd.data_tag_unit_size);
1390 * Pre-defined multi-block transfers are preferable to
1391 * open ended-ones (and necessary for reliable writes).
1392 * However, it is not sufficient to just send CMD23,
1393 * and avoid the final CMD12, as on an error condition
1394 * CMD12 (stop) needs to be sent anyway. This, coupled
1395 * with Auto-CMD23 enhancements provided by some
1396 * hosts, means that the complexity of dealing
1397 * with this is best left to the host. If CMD23 is
1398 * supported by card and host, we'll fill sbc in and let
1399 * the host deal with handling it correctly. This means
1400 * that for hosts that don't expose MMC_CAP_CMD23, no
1401 * change of behavior will be observed.
1403 * N.B: Some MMC cards experience perf degradation.
1404 * We'll avoid using CMD23-bounded multiblock writes for
1405 * these, while retaining features like reliable writes.
1407 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1408 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1410 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1411 brq->sbc.arg = brq->data.blocks |
1412 (do_rel_wr ? (1 << 31) : 0) |
1413 (do_data_tag ? (1 << 29) : 0);
1414 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1415 brq->mrq.sbc = &brq->sbc;
1418 mmc_set_data_timeout(&brq->data, card);
1420 brq->data.sg = mqrq->sg;
1421 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1424 * Adjust the sg list so it is the same size as the
1427 if (brq->data.blocks != blk_rq_sectors(req)) {
1428 int i, data_size = brq->data.blocks << 9;
1429 struct scatterlist *sg;
1431 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1432 data_size -= sg->length;
1433 if (data_size <= 0) {
1434 sg->length += data_size;
1439 brq->data.sg_len = i;
1442 mqrq->mmc_active.mrq = &brq->mrq;
1443 mqrq->mmc_active.err_check = mmc_blk_err_check;
1445 mmc_queue_bounce_pre(mqrq);
1448 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1449 struct mmc_card *card)
1451 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1452 unsigned int max_seg_sz = queue_max_segment_size(q);
1453 unsigned int len, nr_segs = 0;
1456 len = min(hdr_sz, max_seg_sz);
1464 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1466 struct request_queue *q = mq->queue;
1467 struct mmc_card *card = mq->card;
1468 struct request *cur = req, *next = NULL;
1469 struct mmc_blk_data *md = mq->data;
1470 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1471 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1472 unsigned int req_sectors = 0, phys_segments = 0;
1473 unsigned int max_blk_count, max_phys_segs;
1474 bool put_back = true;
1475 u8 max_packed_rw = 0;
1478 if (!(md->flags & MMC_BLK_PACKED_CMD))
1481 if ((rq_data_dir(cur) == WRITE) &&
1482 mmc_host_packed_wr(card->host))
1483 max_packed_rw = card->ext_csd.max_packed_writes;
1485 if (max_packed_rw == 0)
1488 if (mmc_req_rel_wr(cur) &&
1489 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1492 if (mmc_large_sector(card) &&
1493 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1496 mmc_blk_clear_packed(mqrq);
1498 max_blk_count = min(card->host->max_blk_count,
1499 card->host->max_req_size >> 9);
1500 if (unlikely(max_blk_count > 0xffff))
1501 max_blk_count = 0xffff;
1503 max_phys_segs = queue_max_segments(q);
1504 req_sectors += blk_rq_sectors(cur);
1505 phys_segments += cur->nr_phys_segments;
1507 if (rq_data_dir(cur) == WRITE) {
1508 req_sectors += mmc_large_sector(card) ? 8 : 1;
1509 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1513 if (reqs >= max_packed_rw - 1) {
1518 spin_lock_irq(q->queue_lock);
1519 next = blk_fetch_request(q);
1520 spin_unlock_irq(q->queue_lock);
1526 if (mmc_large_sector(card) &&
1527 !IS_ALIGNED(blk_rq_sectors(next), 8))
1530 if (next->cmd_flags & REQ_DISCARD ||
1531 next->cmd_flags & REQ_FLUSH)
1534 if (rq_data_dir(cur) != rq_data_dir(next))
1537 if (mmc_req_rel_wr(next) &&
1538 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1541 req_sectors += blk_rq_sectors(next);
1542 if (req_sectors > max_blk_count)
1545 phys_segments += next->nr_phys_segments;
1546 if (phys_segments > max_phys_segs)
1549 list_add_tail(&next->queuelist, &mqrq->packed->list);
1555 spin_lock_irq(q->queue_lock);
1556 blk_requeue_request(q, next);
1557 spin_unlock_irq(q->queue_lock);
1561 list_add(&req->queuelist, &mqrq->packed->list);
1562 mqrq->packed->nr_entries = ++reqs;
1563 mqrq->packed->retries = reqs;
1568 mqrq->cmd_type = MMC_PACKED_NONE;
1572 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1573 struct mmc_card *card,
1574 struct mmc_queue *mq)
1576 struct mmc_blk_request *brq = &mqrq->brq;
1577 struct request *req = mqrq->req;
1578 struct request *prq;
1579 struct mmc_blk_data *md = mq->data;
1580 struct mmc_packed *packed = mqrq->packed;
1581 bool do_rel_wr, do_data_tag;
1582 u32 *packed_cmd_hdr;
1588 mqrq->cmd_type = MMC_PACKED_WRITE;
1590 packed->idx_failure = MMC_PACKED_NR_IDX;
1592 packed_cmd_hdr = packed->cmd_hdr;
1593 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1594 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1595 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1596 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1599 * Argument for each entry of packed group
1601 list_for_each_entry(prq, &packed->list, queuelist) {
1602 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1603 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1604 (prq->cmd_flags & REQ_META) &&
1605 (rq_data_dir(prq) == WRITE) &&
1606 ((brq->data.blocks * brq->data.blksz) >=
1607 card->ext_csd.data_tag_unit_size);
1608 /* Argument of CMD23 */
1609 packed_cmd_hdr[(i * 2)] =
1610 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1611 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1612 blk_rq_sectors(prq);
1613 /* Argument of CMD18 or CMD25 */
1614 packed_cmd_hdr[((i * 2)) + 1] =
1615 mmc_card_blockaddr(card) ?
1616 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1617 packed->blocks += blk_rq_sectors(prq);
1621 memset(brq, 0, sizeof(struct mmc_blk_request));
1622 brq->mrq.cmd = &brq->cmd;
1623 brq->mrq.data = &brq->data;
1624 brq->mrq.sbc = &brq->sbc;
1625 brq->mrq.stop = &brq->stop;
1627 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1628 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1629 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1631 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1632 brq->cmd.arg = blk_rq_pos(req);
1633 if (!mmc_card_blockaddr(card))
1635 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1637 brq->data.blksz = 512;
1638 brq->data.blocks = packed->blocks + hdr_blocks;
1639 brq->data.flags |= MMC_DATA_WRITE;
1641 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1643 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1645 mmc_set_data_timeout(&brq->data, card);
1647 brq->data.sg = mqrq->sg;
1648 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1650 mqrq->mmc_active.mrq = &brq->mrq;
1651 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1653 mmc_queue_bounce_pre(mqrq);
1656 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1657 struct mmc_blk_request *brq, struct request *req,
1660 struct mmc_queue_req *mq_rq;
1661 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1664 * If this is an SD card and we're writing, we can first
1665 * mark the known good sectors as ok.
1667 * If the card is not SD, we can still ok written sectors
1668 * as reported by the controller (which might be less than
1669 * the real number of written sectors, but never more).
1671 if (mmc_card_sd(card)) {
1674 blocks = mmc_sd_num_wr_blocks(card);
1675 if (blocks != (u32)-1) {
1676 ret = blk_end_request(req, 0, blocks << 9);
1679 if (!mmc_packed_cmd(mq_rq->cmd_type))
1680 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1685 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1687 struct request *prq;
1688 struct mmc_packed *packed = mq_rq->packed;
1689 int idx = packed->idx_failure, i = 0;
1694 while (!list_empty(&packed->list)) {
1695 prq = list_entry_rq(packed->list.next);
1697 /* retry from error index */
1698 packed->nr_entries -= idx;
1702 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1703 list_del_init(&prq->queuelist);
1704 mmc_blk_clear_packed(mq_rq);
1708 list_del_init(&prq->queuelist);
1709 blk_end_request(prq, 0, blk_rq_bytes(prq));
1713 mmc_blk_clear_packed(mq_rq);
1717 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1719 struct request *prq;
1720 struct mmc_packed *packed = mq_rq->packed;
1724 while (!list_empty(&packed->list)) {
1725 prq = list_entry_rq(packed->list.next);
1726 list_del_init(&prq->queuelist);
1727 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1730 mmc_blk_clear_packed(mq_rq);
1733 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1734 struct mmc_queue_req *mq_rq)
1736 struct request *prq;
1737 struct request_queue *q = mq->queue;
1738 struct mmc_packed *packed = mq_rq->packed;
1742 while (!list_empty(&packed->list)) {
1743 prq = list_entry_rq(packed->list.prev);
1744 if (prq->queuelist.prev != &packed->list) {
1745 list_del_init(&prq->queuelist);
1746 spin_lock_irq(q->queue_lock);
1747 blk_requeue_request(mq->queue, prq);
1748 spin_unlock_irq(q->queue_lock);
1750 list_del_init(&prq->queuelist);
1754 mmc_blk_clear_packed(mq_rq);
1757 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1759 struct mmc_blk_data *md = mq->data;
1760 struct mmc_card *card = md->queue.card;
1761 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1762 int ret = 1, disable_multi = 0, retry = 0, type;
1763 enum mmc_blk_status status;
1764 struct mmc_queue_req *mq_rq;
1765 struct request *req = rqc;
1766 struct mmc_async_req *areq;
1767 const u8 packed_nr = 2;
1770 if (!rqc && !mq->mqrq_prev->req)
1774 reqs = mmc_blk_prep_packed_list(mq, rqc);
1779 * When 4KB native sector is enabled, only 8 blocks
1780 * multiple read or write is allowed
1782 if ((brq->data.blocks & 0x07) &&
1783 (card->ext_csd.data_sector_size == 4096)) {
1784 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1785 req->rq_disk->disk_name);
1786 mq_rq = mq->mqrq_cur;
1790 if (reqs >= packed_nr)
1791 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1794 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1795 areq = &mq->mqrq_cur->mmc_active;
1798 areq = mmc_start_req(card->host, areq, (int *) &status);
1800 if (status == MMC_BLK_NEW_REQUEST)
1801 mq->flags |= MMC_QUEUE_NEW_REQUEST;
1805 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1808 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1809 mmc_queue_bounce_post(mq_rq);
1812 case MMC_BLK_SUCCESS:
1813 case MMC_BLK_PARTIAL:
1815 * A block was successfully transferred.
1817 mmc_blk_reset_success(md, type);
1819 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1820 ret = mmc_blk_end_packed_req(mq_rq);
1823 ret = blk_end_request(req, 0,
1824 brq->data.bytes_xfered);
1828 * If the blk_end_request function returns non-zero even
1829 * though all data has been transferred and no errors
1830 * were returned by the host controller, it's a bug.
1832 if (status == MMC_BLK_SUCCESS && ret) {
1833 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1834 __func__, blk_rq_bytes(req),
1835 brq->data.bytes_xfered);
1840 case MMC_BLK_CMD_ERR:
1841 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1842 if (!mmc_blk_reset(md, card->host, type))
1850 if (!mmc_blk_reset(md, card->host, type))
1853 case MMC_BLK_DATA_ERR: {
1856 err = mmc_blk_reset(md, card->host, type);
1859 if (err == -ENODEV ||
1860 mmc_packed_cmd(mq_rq->cmd_type))
1864 case MMC_BLK_ECC_ERR:
1865 if (brq->data.blocks > 1) {
1866 /* Redo read one sector at a time */
1867 pr_warning("%s: retrying using single block read\n",
1868 req->rq_disk->disk_name);
1873 * After an error, we redo I/O one sector at a
1874 * time, so we only reach here after trying to
1875 * read a single sector.
1877 ret = blk_end_request(req, -EIO,
1882 case MMC_BLK_NOMEDIUM:
1885 pr_err("%s: Unhandled return value (%d)",
1886 req->rq_disk->disk_name, status);
1891 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1892 if (!mq_rq->packed->retries)
1894 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1895 mmc_start_req(card->host,
1896 &mq_rq->mmc_active, NULL);
1900 * In case of a incomplete request
1901 * prepare it again and resend.
1903 mmc_blk_rw_rq_prep(mq_rq, card,
1905 mmc_start_req(card->host,
1906 &mq_rq->mmc_active, NULL);
1914 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1915 mmc_blk_abort_packed_req(mq_rq);
1917 if (mmc_card_removed(card))
1918 req->cmd_flags |= REQ_QUIET;
1920 ret = blk_end_request(req, -EIO,
1921 blk_rq_cur_bytes(req));
1926 if (mmc_card_removed(card)) {
1927 rqc->cmd_flags |= REQ_QUIET;
1928 blk_end_request_all(rqc, -EIO);
1931 * If current request is packed, it needs to put back.
1933 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
1934 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
1936 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1937 mmc_start_req(card->host,
1938 &mq->mqrq_cur->mmc_active, NULL);
1945 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1948 struct mmc_blk_data *md = mq->data;
1949 struct mmc_card *card = md->queue.card;
1950 struct mmc_host *host = card->host;
1951 unsigned long flags;
1952 unsigned int cmd_flags = req ? req->cmd_flags : 0;
1954 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1955 if (mmc_bus_needs_resume(card->host))
1956 mmc_resume_bus(card->host);
1959 if (req && !mq->mqrq_prev->req)
1960 /* claim host only for the first request */
1961 mmc_claim_host(card->host);
1963 ret = mmc_blk_part_switch(card, md);
1966 blk_end_request_all(req, -EIO);
1972 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
1973 if (cmd_flags & REQ_DISCARD) {
1974 /* complete ongoing async transfer before issuing discard */
1975 if (card->host->areq)
1976 mmc_blk_issue_rw_rq(mq, NULL);
1977 if (req->cmd_flags & REQ_SECURE &&
1978 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1979 ret = mmc_blk_issue_secdiscard_rq(mq, req);
1981 ret = mmc_blk_issue_discard_rq(mq, req);
1982 } else if (cmd_flags & REQ_FLUSH) {
1983 /* complete ongoing async transfer before issuing flush */
1984 if (card->host->areq)
1985 mmc_blk_issue_rw_rq(mq, NULL);
1986 ret = mmc_blk_issue_flush(mq, req);
1988 if (!req && host->areq) {
1989 spin_lock_irqsave(&host->context_info.lock, flags);
1990 host->context_info.is_waiting_last_req = true;
1991 spin_unlock_irqrestore(&host->context_info.lock, flags);
1993 ret = mmc_blk_issue_rw_rq(mq, req);
1997 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
1998 (cmd_flags & MMC_REQ_SPECIAL_MASK))
2000 * Release host when there are no more requests
2001 * and after special request(discard, flush) is done.
2002 * In case sepecial request, there is no reentry to
2003 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2005 mmc_release_host(card->host);
2009 static inline int mmc_blk_readonly(struct mmc_card *card)
2011 return mmc_card_readonly(card) ||
2012 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2015 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2016 struct device *parent,
2019 const char *subname,
2022 struct mmc_blk_data *md;
2025 devidx = find_first_zero_bit(dev_use, max_devices);
2026 if (devidx >= max_devices)
2027 return ERR_PTR(-ENOSPC);
2028 __set_bit(devidx, dev_use);
2030 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2037 * !subname implies we are creating main mmc_blk_data that will be
2038 * associated with mmc_card with mmc_set_drvdata. Due to device
2039 * partitions, devidx will not coincide with a per-physical card
2040 * index anymore so we keep track of a name index.
2043 md->name_idx = find_first_zero_bit(name_use, max_devices);
2044 __set_bit(md->name_idx, name_use);
2046 md->name_idx = ((struct mmc_blk_data *)
2047 dev_to_disk(parent)->private_data)->name_idx;
2049 md->area_type = area_type;
2052 * Set the read-only status based on the supported commands
2053 * and the write protect switch.
2055 md->read_only = mmc_blk_readonly(card);
2057 md->disk = alloc_disk(perdev_minors);
2058 if (md->disk == NULL) {
2063 spin_lock_init(&md->lock);
2064 INIT_LIST_HEAD(&md->part);
2067 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2071 md->queue.issue_fn = mmc_blk_issue_rq;
2072 md->queue.data = md;
2074 md->disk->major = MMC_BLOCK_MAJOR;
2075 md->disk->first_minor = devidx * perdev_minors;
2076 md->disk->fops = &mmc_bdops;
2077 md->disk->private_data = md;
2078 md->disk->queue = md->queue.queue;
2079 md->disk->driverfs_dev = parent;
2080 set_disk_ro(md->disk, md->read_only || default_ro);
2081 md->disk->flags = GENHD_FL_EXT_DEVT;
2082 if (area_type & MMC_BLK_DATA_AREA_RPMB)
2083 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2086 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2088 * - be set for removable media with permanent block devices
2089 * - be unset for removable block devices with permanent media
2091 * Since MMC block devices clearly fall under the second
2092 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2093 * should use the block device creation/destruction hotplug
2094 * messages to tell when the card is present.
2097 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2098 "mmcblk%d%s", md->name_idx, subname ? subname : "");
2100 if (mmc_card_mmc(card))
2101 blk_queue_logical_block_size(md->queue.queue,
2102 card->ext_csd.data_sector_size);
2104 blk_queue_logical_block_size(md->queue.queue, 512);
2106 set_capacity(md->disk, size);
2108 if (mmc_host_cmd23(card->host)) {
2109 if (mmc_card_mmc(card) ||
2110 (mmc_card_sd(card) &&
2111 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2112 md->flags |= MMC_BLK_CMD23;
2115 if (mmc_card_mmc(card) &&
2116 md->flags & MMC_BLK_CMD23 &&
2117 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2118 card->ext_csd.rel_sectors)) {
2119 md->flags |= MMC_BLK_REL_WR;
2120 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2123 if (mmc_card_mmc(card) &&
2124 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2125 (md->flags & MMC_BLK_CMD23) &&
2126 card->ext_csd.packed_event_en) {
2127 if (!mmc_packed_init(&md->queue, card))
2128 md->flags |= MMC_BLK_PACKED_CMD;
2138 return ERR_PTR(ret);
2141 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2144 struct mmc_blk_data *md;
2146 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2148 * The EXT_CSD sector count is in number or 512 byte
2151 size = card->ext_csd.sectors;
2154 * The CSD capacity field is in units of read_blkbits.
2155 * set_capacity takes units of 512 bytes.
2157 size = card->csd.capacity << (card->csd.read_blkbits - 9);
2160 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2161 MMC_BLK_DATA_AREA_MAIN);
2165 static int mmc_blk_alloc_part(struct mmc_card *card,
2166 struct mmc_blk_data *md,
2167 unsigned int part_type,
2170 const char *subname,
2174 struct mmc_blk_data *part_md;
2176 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2177 subname, area_type);
2178 if (IS_ERR(part_md))
2179 return PTR_ERR(part_md);
2180 part_md->part_type = part_type;
2181 list_add(&part_md->part, &md->part);
2183 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2184 cap_str, sizeof(cap_str));
2185 pr_info("%s: %s %s partition %u %s\n",
2186 part_md->disk->disk_name, mmc_card_id(card),
2187 mmc_card_name(card), part_md->part_type, cap_str);
2191 /* MMC Physical partitions consist of two boot partitions and
2192 * up to four general purpose partitions.
2193 * For each partition enabled in EXT_CSD a block device will be allocatedi
2194 * to provide access to the partition.
2197 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2201 if (!mmc_card_mmc(card))
2204 for (idx = 0; idx < card->nr_parts; idx++) {
2205 if (card->part[idx].size) {
2206 ret = mmc_blk_alloc_part(card, md,
2207 card->part[idx].part_cfg,
2208 card->part[idx].size >> 9,
2209 card->part[idx].force_ro,
2210 card->part[idx].name,
2211 card->part[idx].area_type);
2220 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2222 struct mmc_card *card;
2225 card = md->queue.card;
2226 if (md->disk->flags & GENHD_FL_UP) {
2227 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2228 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2229 card->ext_csd.boot_ro_lockable)
2230 device_remove_file(disk_to_dev(md->disk),
2231 &md->power_ro_lock);
2233 /* Stop new requests from getting into the queue */
2234 del_gendisk(md->disk);
2237 /* Then flush out any already in there */
2238 mmc_cleanup_queue(&md->queue);
2239 if (md->flags & MMC_BLK_PACKED_CMD)
2240 mmc_packed_clean(&md->queue);
2245 static void mmc_blk_remove_parts(struct mmc_card *card,
2246 struct mmc_blk_data *md)
2248 struct list_head *pos, *q;
2249 struct mmc_blk_data *part_md;
2251 __clear_bit(md->name_idx, name_use);
2252 list_for_each_safe(pos, q, &md->part) {
2253 part_md = list_entry(pos, struct mmc_blk_data, part);
2255 mmc_blk_remove_req(part_md);
2259 static int mmc_add_disk(struct mmc_blk_data *md)
2262 struct mmc_card *card = md->queue.card;
2265 md->force_ro.show = force_ro_show;
2266 md->force_ro.store = force_ro_store;
2267 sysfs_attr_init(&md->force_ro.attr);
2268 md->force_ro.attr.name = "force_ro";
2269 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2270 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2274 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2275 card->ext_csd.boot_ro_lockable) {
2278 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2281 mode = S_IRUGO | S_IWUSR;
2283 md->power_ro_lock.show = power_ro_lock_show;
2284 md->power_ro_lock.store = power_ro_lock_store;
2285 sysfs_attr_init(&md->power_ro_lock.attr);
2286 md->power_ro_lock.attr.mode = mode;
2287 md->power_ro_lock.attr.name =
2288 "ro_lock_until_next_power_on";
2289 ret = device_create_file(disk_to_dev(md->disk),
2290 &md->power_ro_lock);
2292 goto power_ro_lock_fail;
2297 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2299 del_gendisk(md->disk);
2304 #define CID_MANFID_SANDISK 0x2
2305 #define CID_MANFID_TOSHIBA 0x11
2306 #define CID_MANFID_MICRON 0x13
2307 #define CID_MANFID_SAMSUNG 0x15
2309 static const struct mmc_fixup blk_fixups[] =
2311 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2312 MMC_QUIRK_INAND_CMD38),
2313 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2314 MMC_QUIRK_INAND_CMD38),
2315 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2316 MMC_QUIRK_INAND_CMD38),
2317 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2318 MMC_QUIRK_INAND_CMD38),
2319 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2320 MMC_QUIRK_INAND_CMD38),
2323 * Some MMC cards experience performance degradation with CMD23
2324 * instead of CMD12-bounded multiblock transfers. For now we'll
2325 * black list what's bad...
2326 * - Certain Toshiba cards.
2328 * N.B. This doesn't affect SD cards.
2330 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2331 MMC_QUIRK_BLK_NO_CMD23),
2332 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2333 MMC_QUIRK_BLK_NO_CMD23),
2334 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2335 MMC_QUIRK_BLK_NO_CMD23),
2338 * Some Micron MMC cards needs longer data read timeout than
2341 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2342 MMC_QUIRK_LONG_READ_TIME),
2345 * On these Samsung MoviNAND parts, performing secure erase or
2346 * secure trim can result in unrecoverable corruption due to a
2349 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2350 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2351 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2352 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2353 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2354 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2355 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2356 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2357 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2358 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2359 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2360 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2361 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2362 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2363 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2364 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2369 static int mmc_blk_probe(struct mmc_card *card)
2371 struct mmc_blk_data *md, *part_md;
2375 * Check that the card supports the command class(es) we need.
2377 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2380 md = mmc_blk_alloc(card);
2384 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
2385 cap_str, sizeof(cap_str));
2386 pr_info("%s: %s %s %s %s\n",
2387 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2388 cap_str, md->read_only ? "(ro)" : "");
2390 if (mmc_blk_alloc_parts(card, md))
2393 mmc_set_drvdata(card, md);
2394 mmc_fixup_device(card, blk_fixups);
2396 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2397 mmc_set_bus_resume_policy(card->host, 1);
2399 if (mmc_add_disk(md))
2402 list_for_each_entry(part_md, &md->part, part) {
2403 if (mmc_add_disk(part_md))
2409 mmc_blk_remove_parts(card, md);
2410 mmc_blk_remove_req(md);
2414 static void mmc_blk_remove(struct mmc_card *card)
2416 struct mmc_blk_data *md = mmc_get_drvdata(card);
2418 mmc_blk_remove_parts(card, md);
2419 mmc_claim_host(card->host);
2420 mmc_blk_part_switch(card, md);
2421 mmc_release_host(card->host);
2422 mmc_blk_remove_req(md);
2423 mmc_set_drvdata(card, NULL);
2424 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2425 mmc_set_bus_resume_policy(card->host, 0);
2430 static int mmc_blk_suspend(struct mmc_card *card)
2432 struct mmc_blk_data *part_md;
2433 struct mmc_blk_data *md = mmc_get_drvdata(card);
2436 mmc_queue_suspend(&md->queue);
2437 list_for_each_entry(part_md, &md->part, part) {
2438 mmc_queue_suspend(&part_md->queue);
2444 static int mmc_blk_resume(struct mmc_card *card)
2446 struct mmc_blk_data *part_md;
2447 struct mmc_blk_data *md = mmc_get_drvdata(card);
2451 * Resume involves the card going into idle state,
2452 * so current partition is always the main one.
2454 md->part_curr = md->part_type;
2455 mmc_queue_resume(&md->queue);
2456 list_for_each_entry(part_md, &md->part, part) {
2457 mmc_queue_resume(&part_md->queue);
2463 #define mmc_blk_suspend NULL
2464 #define mmc_blk_resume NULL
2467 static struct mmc_driver mmc_driver = {
2471 .probe = mmc_blk_probe,
2472 .remove = mmc_blk_remove,
2473 .suspend = mmc_blk_suspend,
2474 .resume = mmc_blk_resume,
2477 static int __init mmc_blk_init(void)
2481 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2482 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2484 max_devices = 256 / perdev_minors;
2486 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2490 res = mmc_register_driver(&mmc_driver);
2496 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2501 static void __exit mmc_blk_exit(void)
2503 mmc_unregister_driver(&mmc_driver);
2504 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2507 module_init(mmc_blk_init);
2508 module_exit(mmc_blk_exit);
2510 MODULE_LICENSE("GPL");
2511 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");