2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 #include <linux/pm_runtime.h>
39 #include <trace/events/mmc.h>
41 #include <linux/mmc/ioctl.h>
42 #include <linux/mmc/card.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sd.h>
47 #include <asm/uaccess.h>
51 MODULE_ALIAS("mmc:block");
52 #ifdef MODULE_PARAM_PREFIX
53 #undef MODULE_PARAM_PREFIX
55 #define MODULE_PARAM_PREFIX "mmcblk."
57 #define INAND_CMD38_ARG_EXT_CSD 113
58 #define INAND_CMD38_ARG_ERASE 0x00
59 #define INAND_CMD38_ARG_TRIM 0x01
60 #define INAND_CMD38_ARG_SECERASE 0x80
61 #define INAND_CMD38_ARG_SECTRIM1 0x81
62 #define INAND_CMD38_ARG_SECTRIM2 0x88
63 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
64 #define MMC_SANITIZE_REQ_TIMEOUT 240000
65 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
67 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
68 (rq_data_dir(req) == WRITE))
69 #define PACKED_CMD_VER 0x01
70 #define PACKED_CMD_WR 0x02
72 static DEFINE_MUTEX(block_mutex);
75 * The defaults come from config options but can be overriden by module
78 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
81 * We've only got one major, so number of mmcblk devices is
82 * limited to (1 << 20) / number of minors per device. It is also
83 * currently limited by the size of the static bitmaps below.
85 static int max_devices;
87 #define MAX_DEVICES 256
89 /* TODO: Replace these with struct ida */
90 static DECLARE_BITMAP(dev_use, MAX_DEVICES);
91 static DECLARE_BITMAP(name_use, MAX_DEVICES);
94 * There is one mmc_blk_data per slot.
99 struct mmc_queue queue;
100 struct list_head part;
103 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
104 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
105 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
108 unsigned int read_only;
109 unsigned int part_type;
110 unsigned int name_idx;
111 unsigned int reset_done;
112 #define MMC_BLK_READ BIT(0)
113 #define MMC_BLK_WRITE BIT(1)
114 #define MMC_BLK_DISCARD BIT(2)
115 #define MMC_BLK_SECDISCARD BIT(3)
118 * Only set in main mmc_blk_data associated
119 * with mmc_card with dev_set_drvdata, and keeps
120 * track of the current selected device partition.
122 unsigned int part_curr;
123 struct device_attribute force_ro;
124 struct device_attribute power_ro_lock;
128 static DEFINE_MUTEX(open_lock);
131 MMC_PACKED_NR_IDX = -1,
133 MMC_PACKED_NR_SINGLE,
136 module_param(perdev_minors, int, 0444);
137 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
139 static inline int mmc_blk_part_switch(struct mmc_card *card,
140 struct mmc_blk_data *md);
141 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
143 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
145 struct mmc_packed *packed = mqrq->packed;
149 mqrq->cmd_type = MMC_PACKED_NONE;
150 packed->nr_entries = MMC_PACKED_NR_ZERO;
151 packed->idx_failure = MMC_PACKED_NR_IDX;
156 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
158 struct mmc_blk_data *md;
160 mutex_lock(&open_lock);
161 md = disk->private_data;
162 if (md && md->usage == 0)
166 mutex_unlock(&open_lock);
171 static inline int mmc_get_devidx(struct gendisk *disk)
173 int devidx = disk->first_minor / perdev_minors;
177 static void mmc_blk_put(struct mmc_blk_data *md)
179 mutex_lock(&open_lock);
181 if (md->usage == 0) {
182 int devidx = mmc_get_devidx(md->disk);
183 blk_cleanup_queue(md->queue.queue);
185 __clear_bit(devidx, dev_use);
190 mutex_unlock(&open_lock);
193 static ssize_t power_ro_lock_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
197 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
198 struct mmc_card *card = md->queue.card;
201 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
203 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
206 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
213 static ssize_t power_ro_lock_store(struct device *dev,
214 struct device_attribute *attr, const char *buf, size_t count)
217 struct mmc_blk_data *md, *part_md;
218 struct mmc_card *card;
221 if (kstrtoul(buf, 0, &set))
227 md = mmc_blk_get(dev_to_disk(dev));
228 card = md->queue.card;
232 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
233 card->ext_csd.boot_ro_lock |
234 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
235 card->ext_csd.part_time);
237 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
239 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
244 pr_info("%s: Locking boot partition ro until next power on\n",
245 md->disk->disk_name);
246 set_disk_ro(md->disk, 1);
248 list_for_each_entry(part_md, &md->part, part)
249 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
250 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
251 set_disk_ro(part_md->disk, 1);
259 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
263 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
265 ret = snprintf(buf, PAGE_SIZE, "%d\n",
266 get_disk_ro(dev_to_disk(dev)) ^
272 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
273 const char *buf, size_t count)
277 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
278 unsigned long set = simple_strtoul(buf, &end, 0);
284 set_disk_ro(dev_to_disk(dev), set || md->read_only);
291 #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
293 static int max_read_speed, max_write_speed, cache_size = 4;
295 module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
296 MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
297 module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
298 MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
299 module_param(cache_size, int, S_IRUSR | S_IRGRP);
300 MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
303 * helper macros and expectations:
304 * size - unsigned long number of bytes
305 * jiffies - unsigned long HZ timestamp difference
306 * speed - unsigned KB/s transfer rate
308 #define size_and_speed_to_jiffies(size, speed) \
309 ((size) * HZ / (speed) / 1024UL)
310 #define jiffies_and_speed_to_size(jiffies, speed) \
311 (((speed) * (jiffies) * 1024UL) / HZ)
312 #define jiffies_and_size_to_speed(jiffies, size) \
313 ((size) * HZ / (jiffies) / 1024UL)
315 /* Limits to report warning */
316 /* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
317 #define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
318 #define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
320 #define speed_valid(speed) ((speed) > 0)
322 static const char off[] = "off\n";
324 static int max_speed_show(int speed, char *buf)
327 return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
329 return scnprintf(buf, PAGE_SIZE, off);
332 static int max_speed_store(const char *buf, struct request_queue *q)
334 unsigned int limit, set = 0;
336 if (!strncasecmp(off, buf, sizeof(off) - 2))
338 if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
342 limit = MAX_SPEED(q);
344 pr_warn("max speed %u ineffective above %u\n", set, limit);
345 limit = MIN_SPEED(q);
347 pr_warn("max speed %u painful below %u\n", set, limit);
351 static ssize_t max_write_speed_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
354 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
355 int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
361 static ssize_t max_write_speed_store(struct device *dev,
362 struct device_attribute *attr,
363 const char *buf, size_t count)
365 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
366 int set = max_speed_store(buf, md->queue.queue);
373 atomic_set(&md->queue.max_write_speed, set);
378 static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
379 max_write_speed_show, max_write_speed_store);
381 static ssize_t max_read_speed_show(struct device *dev,
382 struct device_attribute *attr, char *buf)
384 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
385 int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
391 static ssize_t max_read_speed_store(struct device *dev,
392 struct device_attribute *attr,
393 const char *buf, size_t count)
395 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
396 int set = max_speed_store(buf, md->queue.queue);
403 atomic_set(&md->queue.max_read_speed, set);
408 static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
409 max_read_speed_show, max_read_speed_store);
411 static ssize_t cache_size_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
414 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
415 struct mmc_queue *mq = &md->queue;
416 int cache_size = atomic_read(&mq->cache_size);
420 ret = scnprintf(buf, PAGE_SIZE, off);
422 int speed = atomic_read(&mq->max_write_speed);
424 if (!speed_valid(speed))
425 ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
426 else { /* We accept race between cache_jiffies and cache_used */
427 unsigned long size = jiffies_and_speed_to_size(
428 jiffies - mq->cache_jiffies, speed);
429 long used = atomic_long_read(&mq->cache_used);
434 size = (used - size) * 100 / cache_size
437 ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
446 static ssize_t cache_size_store(struct device *dev,
447 struct device_attribute *attr,
448 const char *buf, size_t count)
450 struct mmc_blk_data *md;
451 unsigned int set = 0;
453 if (strncasecmp(off, buf, sizeof(off) - 2)
454 && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
457 md = mmc_blk_get(dev_to_disk(dev));
458 atomic_set(&md->queue.cache_size, set);
463 static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
464 cache_size_show, cache_size_store);
466 /* correct for write-back */
467 static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
470 int speed = atomic_read(&mq->max_write_speed);
472 if (speed_valid(speed)) {
473 unsigned long size = jiffies_and_speed_to_size(
474 waitfor - mq->cache_jiffies, speed);
475 used = atomic_long_read(&mq->cache_used);
483 atomic_long_set(&mq->cache_used, used);
484 mq->cache_jiffies = waitfor;
489 static void mmc_blk_simulate_delay(
490 struct mmc_queue *mq,
492 unsigned long waitfor)
499 max_speed = (rq_data_dir(req) == READ)
500 ? atomic_read(&mq->max_read_speed)
501 : atomic_read(&mq->max_write_speed);
502 if (speed_valid(max_speed)) {
503 unsigned long bytes = blk_rq_bytes(req);
505 if (rq_data_dir(req) != READ) {
506 int cache_size = atomic_read(&mq->cache_size);
509 unsigned long size = cache_size * 1024L * 1024L;
510 long used = mmc_blk_cache_used(mq, waitfor);
513 atomic_long_set(&mq->cache_used, used);
519 waitfor += size_and_speed_to_jiffies(bytes, max_speed);
520 if (time_is_after_jiffies(waitfor)) {
521 long msecs = jiffies_to_msecs(waitfor - jiffies);
523 if (likely(msecs > 0))
531 #define mmc_blk_simulate_delay(mq, req, waitfor)
535 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
537 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
540 mutex_lock(&block_mutex);
543 check_disk_change(bdev);
546 if ((mode & FMODE_WRITE) && md->read_only) {
551 mutex_unlock(&block_mutex);
556 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
558 struct mmc_blk_data *md = disk->private_data;
560 mutex_lock(&block_mutex);
562 mutex_unlock(&block_mutex);
566 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
568 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
574 struct mmc_blk_ioc_data {
575 struct mmc_ioc_cmd ic;
580 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
581 struct mmc_ioc_cmd __user *user)
583 struct mmc_blk_ioc_data *idata;
586 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
592 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
597 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
598 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
603 if (!idata->buf_bytes)
606 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
612 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
613 idata->ic.data_ptr, idata->buf_bytes)) {
628 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
629 struct mmc_blk_ioc_data *idata)
631 struct mmc_ioc_cmd *ic = &idata->ic;
633 if (copy_to_user(&(ic_ptr->response), ic->response,
634 sizeof(ic->response)))
637 if (!idata->ic.write_flag) {
638 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
639 idata->buf, idata->buf_bytes))
646 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
652 if (!status || !retries_max)
656 err = get_card_status(card, status, 5);
660 if (!R1_STATUS(*status) &&
661 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
662 break; /* RPMB programming operation complete */
665 * Rechedule to give the MMC device a chance to continue
666 * processing the previous command without being polled too
669 usleep_range(1000, 5000);
670 } while (++retry_count < retries_max);
672 if (retry_count == retries_max)
678 static int ioctl_do_sanitize(struct mmc_card *card)
682 if (!mmc_can_sanitize(card)) {
683 pr_warn("%s: %s - SANITIZE is not supported\n",
684 mmc_hostname(card->host), __func__);
689 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
690 mmc_hostname(card->host), __func__);
692 trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
693 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
694 EXT_CSD_SANITIZE_START, 1,
695 MMC_SANITIZE_REQ_TIMEOUT);
696 trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
699 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
700 mmc_hostname(card->host), __func__, err);
702 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
708 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
709 struct mmc_blk_ioc_data *idata)
711 struct mmc_command cmd = {0};
712 struct mmc_data data = {0};
713 struct mmc_request mrq = {NULL};
714 struct scatterlist sg;
719 if (!card || !md || !idata)
722 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
725 cmd.opcode = idata->ic.opcode;
726 cmd.arg = idata->ic.arg;
727 cmd.flags = idata->ic.flags;
729 if (idata->buf_bytes) {
732 data.blksz = idata->ic.blksz;
733 data.blocks = idata->ic.blocks;
735 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
737 if (idata->ic.write_flag)
738 data.flags = MMC_DATA_WRITE;
740 data.flags = MMC_DATA_READ;
742 /* data.flags must already be set before doing this. */
743 mmc_set_data_timeout(&data, card);
745 /* Allow overriding the timeout_ns for empirical tuning. */
746 if (idata->ic.data_timeout_ns)
747 data.timeout_ns = idata->ic.data_timeout_ns;
749 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
751 * Pretend this is a data transfer and rely on the
752 * host driver to compute timeout. When all host
753 * drivers support cmd.cmd_timeout for R1B, this
757 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
759 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
767 err = mmc_blk_part_switch(card, md);
771 if (idata->ic.is_acmd) {
772 err = mmc_app_cmd(card->host, card);
778 err = mmc_set_blockcount(card, data.blocks,
779 idata->ic.write_flag & (1 << 31));
784 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
785 (cmd.opcode == MMC_SWITCH)) {
786 err = ioctl_do_sanitize(card);
789 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
795 mmc_wait_for_req(card->host, &mrq);
798 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
799 __func__, cmd.error);
803 dev_err(mmc_dev(card->host), "%s: data error %d\n",
804 __func__, data.error);
809 * According to the SD specs, some commands require a delay after
810 * issuing the command.
812 if (idata->ic.postsleep_min_us)
813 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
815 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
819 * Ensure RPMB command has completed by polling CMD13
822 err = ioctl_rpmb_card_status_poll(card, &status, 5);
824 dev_err(mmc_dev(card->host),
825 "%s: Card Status=0x%08X, error %d\n",
826 __func__, status, err);
832 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
833 struct mmc_ioc_cmd __user *ic_ptr)
835 struct mmc_blk_ioc_data *idata;
836 struct mmc_blk_data *md;
837 struct mmc_card *card;
838 int err = 0, ioc_err = 0;
841 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
842 * whole block device, not on a partition. This prevents overspray
843 * between sibling partitions.
845 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
848 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
850 return PTR_ERR(idata);
852 md = mmc_blk_get(bdev->bd_disk);
858 card = md->queue.card;
866 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
870 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
877 return ioc_err ? ioc_err : err;
880 static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
881 struct mmc_ioc_multi_cmd __user *user)
883 struct mmc_blk_ioc_data **idata = NULL;
884 struct mmc_ioc_cmd __user *cmds = user->cmds;
885 struct mmc_card *card;
886 struct mmc_blk_data *md;
887 int i, err = 0, ioc_err = 0;
891 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
892 * whole block device, not on a partition. This prevents overspray
893 * between sibling partitions.
895 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
898 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
899 sizeof(num_of_cmds)))
902 if (num_of_cmds > MMC_IOC_MAX_CMDS)
905 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
909 for (i = 0; i < num_of_cmds; i++) {
910 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
911 if (IS_ERR(idata[i])) {
912 err = PTR_ERR(idata[i]);
918 md = mmc_blk_get(bdev->bd_disk);
922 card = md->queue.card;
930 for (i = 0; i < num_of_cmds && !ioc_err; i++)
931 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
935 /* copy to user if data and response */
936 for (i = 0; i < num_of_cmds && !err; i++)
937 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
942 for (i = 0; i < num_of_cmds; i++) {
943 kfree(idata[i]->buf);
947 return ioc_err ? ioc_err : err;
950 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
951 unsigned int cmd, unsigned long arg)
955 return mmc_blk_ioctl_cmd(bdev,
956 (struct mmc_ioc_cmd __user *)arg);
957 case MMC_IOC_MULTI_CMD:
958 return mmc_blk_ioctl_multi_cmd(bdev,
959 (struct mmc_ioc_multi_cmd __user *)arg);
966 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
967 unsigned int cmd, unsigned long arg)
969 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
973 static const struct block_device_operations mmc_bdops = {
974 .open = mmc_blk_open,
975 .release = mmc_blk_release,
976 .getgeo = mmc_blk_getgeo,
977 .owner = THIS_MODULE,
978 .ioctl = mmc_blk_ioctl,
980 .compat_ioctl = mmc_blk_compat_ioctl,
984 static inline int mmc_blk_part_switch(struct mmc_card *card,
985 struct mmc_blk_data *md)
988 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
990 if (main_md->part_curr == md->part_type)
993 if (mmc_card_mmc(card)) {
994 u8 part_config = card->ext_csd.part_config;
996 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
997 part_config |= md->part_type;
999 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1000 EXT_CSD_PART_CONFIG, part_config,
1001 card->ext_csd.part_time);
1005 card->ext_csd.part_config = part_config;
1008 main_md->part_curr = md->part_type;
1012 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
1018 struct mmc_request mrq = {NULL};
1019 struct mmc_command cmd = {0};
1020 struct mmc_data data = {0};
1022 struct scatterlist sg;
1024 cmd.opcode = MMC_APP_CMD;
1025 cmd.arg = card->rca << 16;
1026 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1028 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1031 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
1034 memset(&cmd, 0, sizeof(struct mmc_command));
1036 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1038 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1042 data.flags = MMC_DATA_READ;
1045 mmc_set_data_timeout(&data, card);
1050 blocks = kmalloc(4, GFP_KERNEL);
1054 sg_init_one(&sg, blocks, 4);
1056 mmc_wait_for_req(card->host, &mrq);
1058 result = ntohl(*blocks);
1061 if (cmd.error || data.error)
1067 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
1069 struct mmc_command cmd = {0};
1072 cmd.opcode = MMC_SEND_STATUS;
1073 if (!mmc_host_is_spi(card->host))
1074 cmd.arg = card->rca << 16;
1075 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1076 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1078 *status = cmd.resp[0];
1082 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
1083 bool hw_busy_detect, struct request *req, int *gen_err)
1085 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1090 err = get_card_status(card, &status, 5);
1092 pr_err("%s: error %d requesting status\n",
1093 req->rq_disk->disk_name, err);
1097 if (status & R1_ERROR) {
1098 pr_err("%s: %s: error sending status cmd, status %#x\n",
1099 req->rq_disk->disk_name, __func__, status);
1103 /* We may rely on the host hw to handle busy detection.*/
1104 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
1109 * Timeout if the device never becomes ready for data and never
1110 * leaves the program state.
1112 if (time_after(jiffies, timeout)) {
1113 pr_err("%s: Card stuck in programming state! %s %s\n",
1114 mmc_hostname(card->host),
1115 req->rq_disk->disk_name, __func__);
1120 * Some cards mishandle the status bits,
1121 * so make sure to check both the busy
1122 * indication and the card state.
1124 } while (!(status & R1_READY_FOR_DATA) ||
1125 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1130 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
1131 struct request *req, int *gen_err, u32 *stop_status)
1133 struct mmc_host *host = card->host;
1134 struct mmc_command cmd = {0};
1136 bool use_r1b_resp = rq_data_dir(req) == WRITE;
1139 * Normally we use R1B responses for WRITE, but in cases where the host
1140 * has specified a max_busy_timeout we need to validate it. A failure
1141 * means we need to prevent the host from doing hw busy detection, which
1142 * is done by converting to a R1 response instead.
1144 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
1145 use_r1b_resp = false;
1147 cmd.opcode = MMC_STOP_TRANSMISSION;
1149 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1150 cmd.busy_timeout = timeout_ms;
1152 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1155 err = mmc_wait_for_cmd(host, &cmd, 5);
1159 *stop_status = cmd.resp[0];
1161 /* No need to check card status in case of READ. */
1162 if (rq_data_dir(req) == READ)
1165 if (!mmc_host_is_spi(host) &&
1166 (*stop_status & R1_ERROR)) {
1167 pr_err("%s: %s: general error sending stop command, resp %#x\n",
1168 req->rq_disk->disk_name, __func__, *stop_status);
1172 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
1175 #define ERR_NOMEDIUM 3
1178 #define ERR_CONTINUE 0
1180 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1181 bool status_valid, u32 status)
1185 /* response crc error, retry the r/w cmd */
1186 pr_err("%s: %s sending %s command, card status %#x\n",
1187 req->rq_disk->disk_name, "response CRC error",
1192 pr_err("%s: %s sending %s command, card status %#x\n",
1193 req->rq_disk->disk_name, "timed out", name, status);
1195 /* If the status cmd initially failed, retry the r/w cmd */
1196 if (!status_valid) {
1197 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
1201 * If it was a r/w cmd crc error, or illegal command
1202 * (eg, issued in wrong state) then retry - we should
1203 * have corrected the state problem above.
1205 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1206 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
1210 /* Otherwise abort the command */
1211 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
1215 /* We don't understand the error code the driver gave us */
1216 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1217 req->rq_disk->disk_name, error, status);
1223 * Initial r/w and stop cmd error recovery.
1224 * We don't know whether the card received the r/w cmd or not, so try to
1225 * restore things back to a sane state. Essentially, we do this as follows:
1226 * - Obtain card status. If the first attempt to obtain card status fails,
1227 * the status word will reflect the failed status cmd, not the failed
1228 * r/w cmd. If we fail to obtain card status, it suggests we can no
1229 * longer communicate with the card.
1230 * - Check the card state. If the card received the cmd but there was a
1231 * transient problem with the response, it might still be in a data transfer
1232 * mode. Try to send it a stop command. If this fails, we can't recover.
1233 * - If the r/w cmd failed due to a response CRC error, it was probably
1234 * transient, so retry the cmd.
1235 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1236 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1237 * illegal cmd, retry.
1238 * Otherwise we don't understand what happened, so abort.
1240 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1241 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
1243 bool prev_cmd_status_valid = true;
1244 u32 status, stop_status = 0;
1247 if (mmc_card_removed(card))
1248 return ERR_NOMEDIUM;
1251 * Try to get card status which indicates both the card state
1252 * and why there was no response. If the first attempt fails,
1253 * we can't be sure the returned status is for the r/w command.
1255 for (retry = 2; retry >= 0; retry--) {
1256 err = get_card_status(card, &status, 0);
1260 /* Re-tune if needed */
1261 mmc_retune_recheck(card->host);
1263 prev_cmd_status_valid = false;
1264 pr_err("%s: error %d sending status command, %sing\n",
1265 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1268 /* We couldn't get a response from the card. Give up. */
1270 /* Check if the card is removed */
1271 if (mmc_detect_card_removed(card->host))
1272 return ERR_NOMEDIUM;
1276 /* Flag ECC errors */
1277 if ((status & R1_CARD_ECC_FAILED) ||
1278 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1279 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1282 /* Flag General errors */
1283 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1284 if ((status & R1_ERROR) ||
1285 (brq->stop.resp[0] & R1_ERROR)) {
1286 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1287 req->rq_disk->disk_name, __func__,
1288 brq->stop.resp[0], status);
1293 * Check the current card state. If it is in some data transfer
1294 * mode, tell it to stop (and hopefully transition back to TRAN.)
1296 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1297 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1298 err = send_stop(card,
1299 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1300 req, gen_err, &stop_status);
1302 pr_err("%s: error %d sending stop command\n",
1303 req->rq_disk->disk_name, err);
1305 * If the stop cmd also timed out, the card is probably
1306 * not present, so abort. Other errors are bad news too.
1311 if (stop_status & R1_CARD_ECC_FAILED)
1315 /* Check for set block count errors */
1317 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1318 prev_cmd_status_valid, status);
1320 /* Check for r/w command errors */
1322 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1323 prev_cmd_status_valid, status);
1326 if (!brq->stop.error)
1327 return ERR_CONTINUE;
1329 /* Now for stop errors. These aren't fatal to the transfer. */
1330 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1331 req->rq_disk->disk_name, brq->stop.error,
1332 brq->cmd.resp[0], status);
1335 * Subsitute in our own stop status as this will give the error
1336 * state which happened during the execution of the r/w command.
1339 brq->stop.resp[0] = stop_status;
1340 brq->stop.error = 0;
1342 return ERR_CONTINUE;
1345 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1350 if (md->reset_done & type)
1353 md->reset_done |= type;
1354 err = mmc_hw_reset(host);
1355 /* Ensure we switch back to the correct partition */
1356 if (err != -EOPNOTSUPP) {
1357 struct mmc_blk_data *main_md =
1358 dev_get_drvdata(&host->card->dev);
1361 main_md->part_curr = main_md->part_type;
1362 part_err = mmc_blk_part_switch(host->card, md);
1365 * We have failed to get back into the correct
1366 * partition, so we need to abort the whole request.
1374 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1376 md->reset_done &= ~type;
1379 int mmc_access_rpmb(struct mmc_queue *mq)
1381 struct mmc_blk_data *md = mq->data;
1383 * If this is a RPMB partition access, return ture
1385 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1391 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1393 struct mmc_blk_data *md = mq->data;
1394 struct mmc_card *card = md->queue.card;
1395 unsigned int from, nr, arg;
1396 int err = 0, type = MMC_BLK_DISCARD;
1398 if (!mmc_can_erase(card)) {
1403 from = blk_rq_pos(req);
1404 nr = blk_rq_sectors(req);
1406 if (mmc_can_discard(card))
1407 arg = MMC_DISCARD_ARG;
1408 else if (mmc_can_trim(card))
1411 arg = MMC_ERASE_ARG;
1413 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1414 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1415 INAND_CMD38_ARG_EXT_CSD,
1416 arg == MMC_TRIM_ARG ?
1417 INAND_CMD38_ARG_TRIM :
1418 INAND_CMD38_ARG_ERASE,
1423 err = mmc_erase(card, from, nr, arg);
1425 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1428 mmc_blk_reset_success(md, type);
1429 blk_end_request(req, err, blk_rq_bytes(req));
1434 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1435 struct request *req)
1437 struct mmc_blk_data *md = mq->data;
1438 struct mmc_card *card = md->queue.card;
1439 unsigned int from, nr, arg;
1440 int err = 0, type = MMC_BLK_SECDISCARD;
1442 if (!(mmc_can_secure_erase_trim(card))) {
1447 from = blk_rq_pos(req);
1448 nr = blk_rq_sectors(req);
1450 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1451 arg = MMC_SECURE_TRIM1_ARG;
1453 arg = MMC_SECURE_ERASE_ARG;
1456 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1457 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1458 INAND_CMD38_ARG_EXT_CSD,
1459 arg == MMC_SECURE_TRIM1_ARG ?
1460 INAND_CMD38_ARG_SECTRIM1 :
1461 INAND_CMD38_ARG_SECERASE,
1467 err = mmc_erase(card, from, nr, arg);
1473 if (arg == MMC_SECURE_TRIM1_ARG) {
1474 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1475 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1476 INAND_CMD38_ARG_EXT_CSD,
1477 INAND_CMD38_ARG_SECTRIM2,
1483 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1491 if (err && !mmc_blk_reset(md, card->host, type))
1494 mmc_blk_reset_success(md, type);
1496 blk_end_request(req, err, blk_rq_bytes(req));
1501 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1503 struct mmc_blk_data *md = mq->data;
1504 struct mmc_card *card = md->queue.card;
1507 ret = mmc_flush_cache(card);
1511 #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
1512 else if (atomic_read(&mq->cache_size)) {
1513 long used = mmc_blk_cache_used(mq, jiffies);
1516 int speed = atomic_read(&mq->max_write_speed);
1518 if (speed_valid(speed)) {
1519 unsigned long msecs = jiffies_to_msecs(
1520 size_and_speed_to_jiffies(
1528 blk_end_request_all(req, ret);
1534 * Reformat current write as a reliable write, supporting
1535 * both legacy and the enhanced reliable write MMC cards.
1536 * In each transfer we'll handle only as much as a single
1537 * reliable write can handle, thus finish the request in
1538 * partial completions.
1540 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1541 struct mmc_card *card,
1542 struct request *req)
1544 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1545 /* Legacy mode imposes restrictions on transfers. */
1546 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1547 brq->data.blocks = 1;
1549 if (brq->data.blocks > card->ext_csd.rel_sectors)
1550 brq->data.blocks = card->ext_csd.rel_sectors;
1551 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1552 brq->data.blocks = 1;
1556 #define CMD_ERRORS \
1557 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1558 R1_ADDRESS_ERROR | /* Misaligned address */ \
1559 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1560 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1561 R1_CC_ERROR | /* Card controller error */ \
1562 R1_ERROR) /* General/unknown error */
1564 static int mmc_blk_err_check(struct mmc_card *card,
1565 struct mmc_async_req *areq)
1567 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1569 struct mmc_blk_request *brq = &mq_mrq->brq;
1570 struct request *req = mq_mrq->req;
1571 int need_retune = card->host->need_retune;
1572 int ecc_err = 0, gen_err = 0;
1575 * sbc.error indicates a problem with the set block count
1576 * command. No data will have been transferred.
1578 * cmd.error indicates a problem with the r/w command. No
1579 * data will have been transferred.
1581 * stop.error indicates a problem with the stop command. Data
1582 * may have been transferred, or may still be transferring.
1584 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1586 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1588 return MMC_BLK_RETRY;
1590 return MMC_BLK_ABORT;
1592 return MMC_BLK_NOMEDIUM;
1599 * Check for errors relating to the execution of the
1600 * initial command - such as address errors. No data
1601 * has been transferred.
1603 if (brq->cmd.resp[0] & CMD_ERRORS) {
1604 pr_err("%s: r/w command failed, status = %#x\n",
1605 req->rq_disk->disk_name, brq->cmd.resp[0]);
1606 return MMC_BLK_ABORT;
1610 * Everything else is either success, or a data error of some
1611 * kind. If it was a write, we may have transitioned to
1612 * program mode, which we have to wait for it to complete.
1614 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1617 /* Check stop command response */
1618 if (brq->stop.resp[0] & R1_ERROR) {
1619 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1620 req->rq_disk->disk_name, __func__,
1625 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1628 return MMC_BLK_CMD_ERR;
1631 /* if general error occurs, retry the write operation. */
1633 pr_warn("%s: retrying write for general error\n",
1634 req->rq_disk->disk_name);
1635 return MMC_BLK_RETRY;
1638 if (brq->data.error) {
1639 if (need_retune && !brq->retune_retry_done) {
1640 pr_info("%s: retrying because a re-tune was needed\n",
1641 req->rq_disk->disk_name);
1642 brq->retune_retry_done = 1;
1643 return MMC_BLK_RETRY;
1645 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1646 req->rq_disk->disk_name, brq->data.error,
1647 (unsigned)blk_rq_pos(req),
1648 (unsigned)blk_rq_sectors(req),
1649 brq->cmd.resp[0], brq->stop.resp[0]);
1651 if (rq_data_dir(req) == READ) {
1653 return MMC_BLK_ECC_ERR;
1654 return MMC_BLK_DATA_ERR;
1656 return MMC_BLK_CMD_ERR;
1660 if (!brq->data.bytes_xfered)
1661 return MMC_BLK_RETRY;
1663 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1664 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1665 return MMC_BLK_PARTIAL;
1667 return MMC_BLK_SUCCESS;
1670 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1671 return MMC_BLK_PARTIAL;
1673 return MMC_BLK_SUCCESS;
1676 static int mmc_blk_packed_err_check(struct mmc_card *card,
1677 struct mmc_async_req *areq)
1679 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1681 struct request *req = mq_rq->req;
1682 struct mmc_packed *packed = mq_rq->packed;
1683 int err, check, status;
1689 check = mmc_blk_err_check(card, areq);
1690 err = get_card_status(card, &status, 0);
1692 pr_err("%s: error %d sending status command\n",
1693 req->rq_disk->disk_name, err);
1694 return MMC_BLK_ABORT;
1697 if (status & R1_EXCEPTION_EVENT) {
1698 err = mmc_get_ext_csd(card, &ext_csd);
1700 pr_err("%s: error %d sending ext_csd\n",
1701 req->rq_disk->disk_name, err);
1702 return MMC_BLK_ABORT;
1705 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1706 EXT_CSD_PACKED_FAILURE) &&
1707 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1708 EXT_CSD_PACKED_GENERIC_ERROR)) {
1709 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1710 EXT_CSD_PACKED_INDEXED_ERROR) {
1711 packed->idx_failure =
1712 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1713 check = MMC_BLK_PARTIAL;
1715 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1716 "failure index: %d\n",
1717 req->rq_disk->disk_name, packed->nr_entries,
1718 packed->blocks, packed->idx_failure);
1726 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1727 struct mmc_card *card,
1729 struct mmc_queue *mq)
1731 u32 readcmd, writecmd;
1732 struct mmc_blk_request *brq = &mqrq->brq;
1733 struct request *req = mqrq->req;
1734 struct mmc_blk_data *md = mq->data;
1738 * Reliable writes are used to implement Forced Unit Access and
1739 * are supported only on MMCs.
1741 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1742 (rq_data_dir(req) == WRITE) &&
1743 (md->flags & MMC_BLK_REL_WR);
1745 memset(brq, 0, sizeof(struct mmc_blk_request));
1746 brq->mrq.cmd = &brq->cmd;
1747 brq->mrq.data = &brq->data;
1749 brq->cmd.arg = blk_rq_pos(req);
1750 if (!mmc_card_blockaddr(card))
1752 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1753 brq->data.blksz = 512;
1754 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1756 brq->data.blocks = blk_rq_sectors(req);
1759 * The block layer doesn't support all sector count
1760 * restrictions, so we need to be prepared for too big
1763 if (brq->data.blocks > card->host->max_blk_count)
1764 brq->data.blocks = card->host->max_blk_count;
1766 if (brq->data.blocks > 1) {
1768 * After a read error, we redo the request one sector
1769 * at a time in order to accurately determine which
1770 * sectors can be read successfully.
1773 brq->data.blocks = 1;
1776 * Some controllers have HW issues while operating
1777 * in multiple I/O mode
1779 if (card->host->ops->multi_io_quirk)
1780 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1781 (rq_data_dir(req) == READ) ?
1782 MMC_DATA_READ : MMC_DATA_WRITE,
1786 if (brq->data.blocks > 1 || do_rel_wr) {
1787 /* SPI multiblock writes terminate using a special
1788 * token, not a STOP_TRANSMISSION request.
1790 if (!mmc_host_is_spi(card->host) ||
1791 rq_data_dir(req) == READ)
1792 brq->mrq.stop = &brq->stop;
1793 readcmd = MMC_READ_MULTIPLE_BLOCK;
1794 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1796 brq->mrq.stop = NULL;
1797 readcmd = MMC_READ_SINGLE_BLOCK;
1798 writecmd = MMC_WRITE_BLOCK;
1800 if (rq_data_dir(req) == READ) {
1801 brq->cmd.opcode = readcmd;
1802 brq->data.flags |= MMC_DATA_READ;
1804 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1807 brq->cmd.opcode = writecmd;
1808 brq->data.flags |= MMC_DATA_WRITE;
1810 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1815 mmc_apply_rel_rw(brq, card, req);
1818 * Data tag is used only during writing meta data to speed
1819 * up write and any subsequent read of this meta data
1821 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1822 (req->cmd_flags & REQ_META) &&
1823 (rq_data_dir(req) == WRITE) &&
1824 ((brq->data.blocks * brq->data.blksz) >=
1825 card->ext_csd.data_tag_unit_size);
1828 * Pre-defined multi-block transfers are preferable to
1829 * open ended-ones (and necessary for reliable writes).
1830 * However, it is not sufficient to just send CMD23,
1831 * and avoid the final CMD12, as on an error condition
1832 * CMD12 (stop) needs to be sent anyway. This, coupled
1833 * with Auto-CMD23 enhancements provided by some
1834 * hosts, means that the complexity of dealing
1835 * with this is best left to the host. If CMD23 is
1836 * supported by card and host, we'll fill sbc in and let
1837 * the host deal with handling it correctly. This means
1838 * that for hosts that don't expose MMC_CAP_CMD23, no
1839 * change of behavior will be observed.
1841 * N.B: Some MMC cards experience perf degradation.
1842 * We'll avoid using CMD23-bounded multiblock writes for
1843 * these, while retaining features like reliable writes.
1845 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1846 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1848 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1849 brq->sbc.arg = brq->data.blocks |
1850 (do_rel_wr ? (1 << 31) : 0) |
1851 (do_data_tag ? (1 << 29) : 0);
1852 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1853 brq->mrq.sbc = &brq->sbc;
1856 mmc_set_data_timeout(&brq->data, card);
1858 brq->data.sg = mqrq->sg;
1859 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1862 * Adjust the sg list so it is the same size as the
1865 if (brq->data.blocks != blk_rq_sectors(req)) {
1866 int i, data_size = brq->data.blocks << 9;
1867 struct scatterlist *sg;
1869 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1870 data_size -= sg->length;
1871 if (data_size <= 0) {
1872 sg->length += data_size;
1877 brq->data.sg_len = i;
1880 mqrq->mmc_active.mrq = &brq->mrq;
1881 mqrq->mmc_active.err_check = mmc_blk_err_check;
1883 mmc_queue_bounce_pre(mqrq);
1886 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1887 struct mmc_card *card)
1889 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1890 unsigned int max_seg_sz = queue_max_segment_size(q);
1891 unsigned int len, nr_segs = 0;
1894 len = min(hdr_sz, max_seg_sz);
1902 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1904 struct request_queue *q = mq->queue;
1905 struct mmc_card *card = mq->card;
1906 struct request *cur = req, *next = NULL;
1907 struct mmc_blk_data *md = mq->data;
1908 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1909 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1910 unsigned int req_sectors = 0, phys_segments = 0;
1911 unsigned int max_blk_count, max_phys_segs;
1912 bool put_back = true;
1913 u8 max_packed_rw = 0;
1916 if (!(md->flags & MMC_BLK_PACKED_CMD))
1919 if ((rq_data_dir(cur) == WRITE) &&
1920 mmc_host_packed_wr(card->host))
1921 max_packed_rw = card->ext_csd.max_packed_writes;
1923 if (max_packed_rw == 0)
1926 if (mmc_req_rel_wr(cur) &&
1927 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1930 if (mmc_large_sector(card) &&
1931 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1934 mmc_blk_clear_packed(mqrq);
1936 max_blk_count = min(card->host->max_blk_count,
1937 card->host->max_req_size >> 9);
1938 if (unlikely(max_blk_count > 0xffff))
1939 max_blk_count = 0xffff;
1941 max_phys_segs = queue_max_segments(q);
1942 req_sectors += blk_rq_sectors(cur);
1943 phys_segments += cur->nr_phys_segments;
1945 if (rq_data_dir(cur) == WRITE) {
1946 req_sectors += mmc_large_sector(card) ? 8 : 1;
1947 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1951 if (reqs >= max_packed_rw - 1) {
1956 spin_lock_irq(q->queue_lock);
1957 next = blk_fetch_request(q);
1958 spin_unlock_irq(q->queue_lock);
1964 if (mmc_large_sector(card) &&
1965 !IS_ALIGNED(blk_rq_sectors(next), 8))
1968 if (next->cmd_flags & REQ_DISCARD ||
1969 next->cmd_flags & REQ_FLUSH)
1972 if (rq_data_dir(cur) != rq_data_dir(next))
1975 if (mmc_req_rel_wr(next) &&
1976 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1979 req_sectors += blk_rq_sectors(next);
1980 if (req_sectors > max_blk_count)
1983 phys_segments += next->nr_phys_segments;
1984 if (phys_segments > max_phys_segs)
1987 list_add_tail(&next->queuelist, &mqrq->packed->list);
1993 spin_lock_irq(q->queue_lock);
1994 blk_requeue_request(q, next);
1995 spin_unlock_irq(q->queue_lock);
1999 list_add(&req->queuelist, &mqrq->packed->list);
2000 mqrq->packed->nr_entries = ++reqs;
2001 mqrq->packed->retries = reqs;
2006 mqrq->cmd_type = MMC_PACKED_NONE;
2010 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
2011 struct mmc_card *card,
2012 struct mmc_queue *mq)
2014 struct mmc_blk_request *brq = &mqrq->brq;
2015 struct request *req = mqrq->req;
2016 struct request *prq;
2017 struct mmc_blk_data *md = mq->data;
2018 struct mmc_packed *packed = mqrq->packed;
2019 bool do_rel_wr, do_data_tag;
2020 u32 *packed_cmd_hdr;
2026 mqrq->cmd_type = MMC_PACKED_WRITE;
2028 packed->idx_failure = MMC_PACKED_NR_IDX;
2030 packed_cmd_hdr = packed->cmd_hdr;
2031 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
2032 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
2033 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
2034 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
2037 * Argument for each entry of packed group
2039 list_for_each_entry(prq, &packed->list, queuelist) {
2040 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
2041 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
2042 (prq->cmd_flags & REQ_META) &&
2043 (rq_data_dir(prq) == WRITE) &&
2044 ((brq->data.blocks * brq->data.blksz) >=
2045 card->ext_csd.data_tag_unit_size);
2046 /* Argument of CMD23 */
2047 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
2048 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
2049 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
2050 blk_rq_sectors(prq));
2051 /* Argument of CMD18 or CMD25 */
2052 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
2053 mmc_card_blockaddr(card) ?
2054 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
2055 packed->blocks += blk_rq_sectors(prq);
2059 memset(brq, 0, sizeof(struct mmc_blk_request));
2060 brq->mrq.cmd = &brq->cmd;
2061 brq->mrq.data = &brq->data;
2062 brq->mrq.sbc = &brq->sbc;
2063 brq->mrq.stop = &brq->stop;
2065 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2066 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2067 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2069 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2070 brq->cmd.arg = blk_rq_pos(req);
2071 if (!mmc_card_blockaddr(card))
2073 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2075 brq->data.blksz = 512;
2076 brq->data.blocks = packed->blocks + hdr_blocks;
2077 brq->data.flags |= MMC_DATA_WRITE;
2079 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2081 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2083 mmc_set_data_timeout(&brq->data, card);
2085 brq->data.sg = mqrq->sg;
2086 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2088 mqrq->mmc_active.mrq = &brq->mrq;
2089 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2091 mmc_queue_bounce_pre(mqrq);
2094 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2095 struct mmc_blk_request *brq, struct request *req,
2098 struct mmc_queue_req *mq_rq;
2099 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2102 * If this is an SD card and we're writing, we can first
2103 * mark the known good sectors as ok.
2105 * If the card is not SD, we can still ok written sectors
2106 * as reported by the controller (which might be less than
2107 * the real number of written sectors, but never more).
2109 if (mmc_card_sd(card)) {
2112 blocks = mmc_sd_num_wr_blocks(card);
2113 if (blocks != (u32)-1) {
2114 ret = blk_end_request(req, 0, blocks << 9);
2117 if (!mmc_packed_cmd(mq_rq->cmd_type))
2118 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
2123 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2125 struct request *prq;
2126 struct mmc_packed *packed = mq_rq->packed;
2127 int idx = packed->idx_failure, i = 0;
2132 while (!list_empty(&packed->list)) {
2133 prq = list_entry_rq(packed->list.next);
2135 /* retry from error index */
2136 packed->nr_entries -= idx;
2140 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2141 list_del_init(&prq->queuelist);
2142 mmc_blk_clear_packed(mq_rq);
2146 list_del_init(&prq->queuelist);
2147 blk_end_request(prq, 0, blk_rq_bytes(prq));
2151 mmc_blk_clear_packed(mq_rq);
2155 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2157 struct request *prq;
2158 struct mmc_packed *packed = mq_rq->packed;
2162 while (!list_empty(&packed->list)) {
2163 prq = list_entry_rq(packed->list.next);
2164 list_del_init(&prq->queuelist);
2165 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2168 mmc_blk_clear_packed(mq_rq);
2171 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2172 struct mmc_queue_req *mq_rq)
2174 struct request *prq;
2175 struct request_queue *q = mq->queue;
2176 struct mmc_packed *packed = mq_rq->packed;
2180 while (!list_empty(&packed->list)) {
2181 prq = list_entry_rq(packed->list.prev);
2182 if (prq->queuelist.prev != &packed->list) {
2183 list_del_init(&prq->queuelist);
2184 spin_lock_irq(q->queue_lock);
2185 blk_requeue_request(mq->queue, prq);
2186 spin_unlock_irq(q->queue_lock);
2188 list_del_init(&prq->queuelist);
2192 mmc_blk_clear_packed(mq_rq);
2195 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
2197 struct mmc_blk_data *md = mq->data;
2198 struct mmc_card *card = md->queue.card;
2199 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
2200 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
2201 enum mmc_blk_status status;
2202 struct mmc_queue_req *mq_rq;
2203 struct request *req = rqc;
2204 struct mmc_async_req *areq;
2205 const u8 packed_nr = 2;
2207 #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
2208 unsigned long waitfor = jiffies;
2211 if (!rqc && !mq->mqrq_prev->req)
2215 reqs = mmc_blk_prep_packed_list(mq, rqc);
2220 * When 4KB native sector is enabled, only 8 blocks
2221 * multiple read or write is allowed
2223 if ((brq->data.blocks & 0x07) &&
2224 (card->ext_csd.data_sector_size == 4096)) {
2225 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2226 req->rq_disk->disk_name);
2227 mq_rq = mq->mqrq_cur;
2231 if (reqs >= packed_nr)
2232 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
2235 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2236 areq = &mq->mqrq_cur->mmc_active;
2239 areq = mmc_start_req(card->host, areq, (int *) &status);
2241 if (status == MMC_BLK_NEW_REQUEST)
2242 mq->flags |= MMC_QUEUE_NEW_REQUEST;
2246 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2249 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
2250 mmc_queue_bounce_post(mq_rq);
2253 case MMC_BLK_SUCCESS:
2254 case MMC_BLK_PARTIAL:
2256 * A block was successfully transferred.
2258 mmc_blk_reset_success(md, type);
2260 mmc_blk_simulate_delay(mq, rqc, waitfor);
2262 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2263 ret = mmc_blk_end_packed_req(mq_rq);
2266 ret = blk_end_request(req, 0,
2267 brq->data.bytes_xfered);
2271 * If the blk_end_request function returns non-zero even
2272 * though all data has been transferred and no errors
2273 * were returned by the host controller, it's a bug.
2275 if (status == MMC_BLK_SUCCESS && ret) {
2276 pr_err("%s BUG rq_tot %d d_xfer %d\n",
2277 __func__, blk_rq_bytes(req),
2278 brq->data.bytes_xfered);
2283 case MMC_BLK_CMD_ERR:
2284 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
2285 if (mmc_blk_reset(md, card->host, type))
2291 retune_retry_done = brq->retune_retry_done;
2296 if (!mmc_blk_reset(md, card->host, type))
2299 case MMC_BLK_DATA_ERR: {
2302 err = mmc_blk_reset(md, card->host, type);
2305 if (err == -ENODEV ||
2306 mmc_packed_cmd(mq_rq->cmd_type))
2310 case MMC_BLK_ECC_ERR:
2311 if (brq->data.blocks > 1) {
2312 /* Redo read one sector at a time */
2313 pr_warn("%s: retrying using single block read\n",
2314 req->rq_disk->disk_name);
2319 * After an error, we redo I/O one sector at a
2320 * time, so we only reach here after trying to
2321 * read a single sector.
2323 ret = blk_end_request(req, -EIO,
2328 case MMC_BLK_NOMEDIUM:
2331 pr_err("%s: Unhandled return value (%d)",
2332 req->rq_disk->disk_name, status);
2337 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2338 if (!mq_rq->packed->retries)
2340 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2341 mmc_start_req(card->host,
2342 &mq_rq->mmc_active, NULL);
2346 * In case of a incomplete request
2347 * prepare it again and resend.
2349 mmc_blk_rw_rq_prep(mq_rq, card,
2351 mmc_start_req(card->host,
2352 &mq_rq->mmc_active, NULL);
2354 mq_rq->brq.retune_retry_done = retune_retry_done;
2361 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2362 mmc_blk_abort_packed_req(mq_rq);
2364 if (mmc_card_removed(card))
2365 req->cmd_flags |= REQ_QUIET;
2367 ret = blk_end_request(req, -EIO,
2368 blk_rq_cur_bytes(req));
2373 if (mmc_card_removed(card)) {
2374 rqc->cmd_flags |= REQ_QUIET;
2375 blk_end_request_all(rqc, -EIO);
2378 * If current request is packed, it needs to put back.
2380 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2381 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2383 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2384 mmc_start_req(card->host,
2385 &mq->mqrq_cur->mmc_active, NULL);
2392 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2395 struct mmc_blk_data *md = mq->data;
2396 struct mmc_card *card = md->queue.card;
2397 struct mmc_host *host = card->host;
2398 unsigned long flags;
2399 unsigned int cmd_flags = req ? req->cmd_flags : 0;
2401 if (req && !mq->mqrq_prev->req)
2402 /* claim host only for the first request */
2405 ret = mmc_blk_part_switch(card, md);
2408 blk_end_request_all(req, -EIO);
2414 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2415 if (cmd_flags & REQ_DISCARD) {
2416 /* complete ongoing async transfer before issuing discard */
2417 if (card->host->areq)
2418 mmc_blk_issue_rw_rq(mq, NULL);
2419 if (req->cmd_flags & REQ_SECURE)
2420 ret = mmc_blk_issue_secdiscard_rq(mq, req);
2422 ret = mmc_blk_issue_discard_rq(mq, req);
2423 } else if (cmd_flags & REQ_FLUSH) {
2424 /* complete ongoing async transfer before issuing flush */
2425 if (card->host->areq)
2426 mmc_blk_issue_rw_rq(mq, NULL);
2427 ret = mmc_blk_issue_flush(mq, req);
2429 if (!req && host->areq) {
2430 spin_lock_irqsave(&host->context_info.lock, flags);
2431 host->context_info.is_waiting_last_req = true;
2432 spin_unlock_irqrestore(&host->context_info.lock, flags);
2434 ret = mmc_blk_issue_rw_rq(mq, req);
2438 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2439 (cmd_flags & MMC_REQ_SPECIAL_MASK))
2441 * Release host when there are no more requests
2442 * and after special request(discard, flush) is done.
2443 * In case sepecial request, there is no reentry to
2444 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2450 static inline int mmc_blk_readonly(struct mmc_card *card)
2452 return mmc_card_readonly(card) ||
2453 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2456 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2457 struct device *parent,
2460 const char *subname,
2463 struct mmc_blk_data *md;
2466 devidx = find_first_zero_bit(dev_use, max_devices);
2467 if (devidx >= max_devices)
2468 return ERR_PTR(-ENOSPC);
2469 __set_bit(devidx, dev_use);
2471 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2478 * !subname implies we are creating main mmc_blk_data that will be
2479 * associated with mmc_card with dev_set_drvdata. Due to device
2480 * partitions, devidx will not coincide with a per-physical card
2481 * index anymore so we keep track of a name index.
2484 md->name_idx = find_first_zero_bit(name_use, max_devices);
2485 __set_bit(md->name_idx, name_use);
2487 md->name_idx = ((struct mmc_blk_data *)
2488 dev_to_disk(parent)->private_data)->name_idx;
2490 md->area_type = area_type;
2493 * Set the read-only status based on the supported commands
2494 * and the write protect switch.
2496 md->read_only = mmc_blk_readonly(card);
2498 md->disk = alloc_disk(perdev_minors);
2499 if (md->disk == NULL) {
2504 spin_lock_init(&md->lock);
2505 INIT_LIST_HEAD(&md->part);
2508 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2512 md->queue.issue_fn = mmc_blk_issue_rq;
2513 md->queue.data = md;
2515 md->disk->major = MMC_BLOCK_MAJOR;
2516 md->disk->first_minor = devidx * perdev_minors;
2517 md->disk->fops = &mmc_bdops;
2518 md->disk->private_data = md;
2519 md->disk->queue = md->queue.queue;
2520 md->disk->driverfs_dev = parent;
2521 set_disk_ro(md->disk, md->read_only || default_ro);
2522 md->disk->flags = GENHD_FL_EXT_DEVT;
2523 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2524 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2527 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2529 * - be set for removable media with permanent block devices
2530 * - be unset for removable block devices with permanent media
2532 * Since MMC block devices clearly fall under the second
2533 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2534 * should use the block device creation/destruction hotplug
2535 * messages to tell when the card is present.
2538 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2539 "mmcblk%u%s", md->name_idx, subname ? subname : "");
2541 if (mmc_card_mmc(card))
2542 blk_queue_logical_block_size(md->queue.queue,
2543 card->ext_csd.data_sector_size);
2545 blk_queue_logical_block_size(md->queue.queue, 512);
2547 set_capacity(md->disk, size);
2549 if (mmc_host_cmd23(card->host)) {
2550 if (mmc_card_mmc(card) ||
2551 (mmc_card_sd(card) &&
2552 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2553 md->flags |= MMC_BLK_CMD23;
2556 if (mmc_card_mmc(card) &&
2557 md->flags & MMC_BLK_CMD23 &&
2558 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2559 card->ext_csd.rel_sectors)) {
2560 md->flags |= MMC_BLK_REL_WR;
2561 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2564 if (mmc_card_mmc(card) &&
2565 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2566 (md->flags & MMC_BLK_CMD23) &&
2567 card->ext_csd.packed_event_en) {
2568 if (!mmc_packed_init(&md->queue, card))
2569 md->flags |= MMC_BLK_PACKED_CMD;
2579 return ERR_PTR(ret);
2582 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2586 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2588 * The EXT_CSD sector count is in number or 512 byte
2591 size = card->ext_csd.sectors;
2594 * The CSD capacity field is in units of read_blkbits.
2595 * set_capacity takes units of 512 bytes.
2597 size = (typeof(sector_t))card->csd.capacity
2598 << (card->csd.read_blkbits - 9);
2601 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2602 MMC_BLK_DATA_AREA_MAIN);
2605 static int mmc_blk_alloc_part(struct mmc_card *card,
2606 struct mmc_blk_data *md,
2607 unsigned int part_type,
2610 const char *subname,
2614 struct mmc_blk_data *part_md;
2616 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2617 subname, area_type);
2618 if (IS_ERR(part_md))
2619 return PTR_ERR(part_md);
2620 part_md->part_type = part_type;
2621 list_add(&part_md->part, &md->part);
2623 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2624 cap_str, sizeof(cap_str));
2625 pr_info("%s: %s %s partition %u %s\n",
2626 part_md->disk->disk_name, mmc_card_id(card),
2627 mmc_card_name(card), part_md->part_type, cap_str);
2631 /* MMC Physical partitions consist of two boot partitions and
2632 * up to four general purpose partitions.
2633 * For each partition enabled in EXT_CSD a block device will be allocatedi
2634 * to provide access to the partition.
2637 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2641 if (!mmc_card_mmc(card))
2644 for (idx = 0; idx < card->nr_parts; idx++) {
2645 if (card->part[idx].size) {
2646 ret = mmc_blk_alloc_part(card, md,
2647 card->part[idx].part_cfg,
2648 card->part[idx].size >> 9,
2649 card->part[idx].force_ro,
2650 card->part[idx].name,
2651 card->part[idx].area_type);
2660 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2662 struct mmc_card *card;
2666 * Flush remaining requests and free queues. It
2667 * is freeing the queue that stops new requests
2668 * from being accepted.
2670 card = md->queue.card;
2671 mmc_cleanup_queue(&md->queue);
2672 if (md->flags & MMC_BLK_PACKED_CMD)
2673 mmc_packed_clean(&md->queue);
2674 if (md->disk->flags & GENHD_FL_UP) {
2675 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2676 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2677 card->ext_csd.boot_ro_lockable)
2678 device_remove_file(disk_to_dev(md->disk),
2679 &md->power_ro_lock);
2680 #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
2681 device_remove_file(disk_to_dev(md->disk),
2682 &dev_attr_max_write_speed);
2683 device_remove_file(disk_to_dev(md->disk),
2684 &dev_attr_max_read_speed);
2685 device_remove_file(disk_to_dev(md->disk),
2686 &dev_attr_cache_size);
2689 del_gendisk(md->disk);
2695 static void mmc_blk_remove_parts(struct mmc_card *card,
2696 struct mmc_blk_data *md)
2698 struct list_head *pos, *q;
2699 struct mmc_blk_data *part_md;
2701 __clear_bit(md->name_idx, name_use);
2702 list_for_each_safe(pos, q, &md->part) {
2703 part_md = list_entry(pos, struct mmc_blk_data, part);
2705 mmc_blk_remove_req(part_md);
2709 static int mmc_add_disk(struct mmc_blk_data *md)
2712 struct mmc_card *card = md->queue.card;
2715 md->force_ro.show = force_ro_show;
2716 md->force_ro.store = force_ro_store;
2717 sysfs_attr_init(&md->force_ro.attr);
2718 md->force_ro.attr.name = "force_ro";
2719 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2720 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2723 #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
2724 atomic_set(&md->queue.max_write_speed, max_write_speed);
2725 ret = device_create_file(disk_to_dev(md->disk),
2726 &dev_attr_max_write_speed);
2728 goto max_write_speed_fail;
2729 atomic_set(&md->queue.max_read_speed, max_read_speed);
2730 ret = device_create_file(disk_to_dev(md->disk),
2731 &dev_attr_max_read_speed);
2733 goto max_read_speed_fail;
2734 atomic_set(&md->queue.cache_size, cache_size);
2735 atomic_long_set(&md->queue.cache_used, 0);
2736 md->queue.cache_jiffies = jiffies;
2737 ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
2739 goto cache_size_fail;
2742 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2743 card->ext_csd.boot_ro_lockable) {
2746 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2749 mode = S_IRUGO | S_IWUSR;
2751 md->power_ro_lock.show = power_ro_lock_show;
2752 md->power_ro_lock.store = power_ro_lock_store;
2753 sysfs_attr_init(&md->power_ro_lock.attr);
2754 md->power_ro_lock.attr.mode = mode;
2755 md->power_ro_lock.attr.name =
2756 "ro_lock_until_next_power_on";
2757 ret = device_create_file(disk_to_dev(md->disk),
2758 &md->power_ro_lock);
2760 goto power_ro_lock_fail;
2765 #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
2766 device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
2768 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
2769 max_read_speed_fail:
2770 device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
2771 max_write_speed_fail:
2773 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2775 del_gendisk(md->disk);
2780 #define CID_MANFID_SANDISK 0x2
2781 #define CID_MANFID_TOSHIBA 0x11
2782 #define CID_MANFID_MICRON 0x13
2783 #define CID_MANFID_SAMSUNG 0x15
2784 #define CID_MANFID_KINGSTON 0x70
2786 static const struct mmc_fixup blk_fixups[] =
2788 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2789 MMC_QUIRK_INAND_CMD38),
2790 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2791 MMC_QUIRK_INAND_CMD38),
2792 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2793 MMC_QUIRK_INAND_CMD38),
2794 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2795 MMC_QUIRK_INAND_CMD38),
2796 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2797 MMC_QUIRK_INAND_CMD38),
2800 * Some MMC cards experience performance degradation with CMD23
2801 * instead of CMD12-bounded multiblock transfers. For now we'll
2802 * black list what's bad...
2803 * - Certain Toshiba cards.
2805 * N.B. This doesn't affect SD cards.
2807 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2808 MMC_QUIRK_BLK_NO_CMD23),
2809 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2810 MMC_QUIRK_BLK_NO_CMD23),
2811 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2812 MMC_QUIRK_BLK_NO_CMD23),
2813 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2814 MMC_QUIRK_BLK_NO_CMD23),
2815 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2816 MMC_QUIRK_BLK_NO_CMD23),
2819 * Some MMC cards need longer data read timeout than indicated in CSD.
2821 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2822 MMC_QUIRK_LONG_READ_TIME),
2823 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2824 MMC_QUIRK_LONG_READ_TIME),
2827 * On these Samsung MoviNAND parts, performing secure erase or
2828 * secure trim can result in unrecoverable corruption due to a
2831 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2832 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2833 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2834 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2835 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2836 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2837 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2838 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2839 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2840 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2841 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2842 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2843 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2844 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2845 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2846 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2849 * On Some Kingston eMMCs, performing trim can result in
2850 * unrecoverable data conrruption occasionally due to a firmware bug.
2852 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2853 MMC_QUIRK_TRIM_BROKEN),
2854 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2855 MMC_QUIRK_TRIM_BROKEN),
2860 static int mmc_blk_probe(struct mmc_card *card)
2862 struct mmc_blk_data *md, *part_md;
2866 * Check that the card supports the command class(es) we need.
2868 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2871 mmc_fixup_device(card, blk_fixups);
2873 md = mmc_blk_alloc(card);
2877 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2878 cap_str, sizeof(cap_str));
2879 pr_info("%s: %s %s %s %s\n",
2880 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2881 cap_str, md->read_only ? "(ro)" : "");
2883 if (mmc_blk_alloc_parts(card, md))
2886 dev_set_drvdata(&card->dev, md);
2888 if (mmc_add_disk(md))
2891 list_for_each_entry(part_md, &md->part, part) {
2892 if (mmc_add_disk(part_md))
2896 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2897 pm_runtime_use_autosuspend(&card->dev);
2900 * Don't enable runtime PM for SD-combo cards here. Leave that
2901 * decision to be taken during the SDIO init sequence instead.
2903 if (card->type != MMC_TYPE_SD_COMBO) {
2904 pm_runtime_set_active(&card->dev);
2905 pm_runtime_enable(&card->dev);
2911 mmc_blk_remove_parts(card, md);
2912 mmc_blk_remove_req(md);
2916 static void mmc_blk_remove(struct mmc_card *card)
2918 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2920 mmc_blk_remove_parts(card, md);
2921 pm_runtime_get_sync(&card->dev);
2922 mmc_claim_host(card->host);
2923 mmc_blk_part_switch(card, md);
2924 mmc_release_host(card->host);
2925 if (card->type != MMC_TYPE_SD_COMBO)
2926 pm_runtime_disable(&card->dev);
2927 pm_runtime_put_noidle(&card->dev);
2928 mmc_blk_remove_req(md);
2929 dev_set_drvdata(&card->dev, NULL);
2932 static int _mmc_blk_suspend(struct mmc_card *card)
2934 struct mmc_blk_data *part_md;
2935 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2938 mmc_queue_suspend(&md->queue);
2939 list_for_each_entry(part_md, &md->part, part) {
2940 mmc_queue_suspend(&part_md->queue);
2946 static void mmc_blk_shutdown(struct mmc_card *card)
2948 _mmc_blk_suspend(card);
2951 #ifdef CONFIG_PM_SLEEP
2952 static int mmc_blk_suspend(struct device *dev)
2954 struct mmc_card *card = mmc_dev_to_card(dev);
2956 return _mmc_blk_suspend(card);
2959 static int mmc_blk_resume(struct device *dev)
2961 struct mmc_blk_data *part_md;
2962 struct mmc_blk_data *md = dev_get_drvdata(dev);
2966 * Resume involves the card going into idle state,
2967 * so current partition is always the main one.
2969 md->part_curr = md->part_type;
2970 mmc_queue_resume(&md->queue);
2971 list_for_each_entry(part_md, &md->part, part) {
2972 mmc_queue_resume(&part_md->queue);
2979 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2981 static struct mmc_driver mmc_driver = {
2984 .pm = &mmc_blk_pm_ops,
2986 .probe = mmc_blk_probe,
2987 .remove = mmc_blk_remove,
2988 .shutdown = mmc_blk_shutdown,
2991 static int __init mmc_blk_init(void)
2995 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2996 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2998 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
3000 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3004 res = mmc_register_driver(&mmc_driver);
3010 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3015 static void __exit mmc_blk_exit(void)
3017 mmc_unregister_driver(&mmc_driver);
3018 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3021 module_init(mmc_blk_init);
3022 module_exit(mmc_blk_exit);
3024 MODULE_LICENSE("GPL");
3025 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");