2 * MTD device concatenation layer
4 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
7 * NAND support by Christian Gan <cgan@iders.ca>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/types.h>
30 #include <linux/backing-dev.h>
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/concat.h>
35 #include <asm/div64.h>
38 * Our storage structure:
39 * Subdev points to an array of pointers to struct mtd_info objects
40 * which is allocated along with this structure
46 struct mtd_info **subdev;
50 * how to calculate the size required for the above structure,
51 * including the pointer array subdev points to:
53 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
54 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
57 * Given a pointer to the MTD object in the mtd_concat structure,
58 * we can retrieve the pointer to that structure with this macro.
60 #define CONCAT(x) ((struct mtd_concat *)(x))
63 * MTD methods which look up the relevant subdevice, translate the
64 * effective address and pass through to the subdevice.
68 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69 size_t * retlen, u_char * buf)
71 struct mtd_concat *concat = CONCAT(mtd);
75 for (i = 0; i < concat->num_subdev; i++) {
76 struct mtd_info *subdev = concat->subdev[i];
79 if (from >= subdev->size) {
80 /* Not destined for this subdev */
85 if (from + len > subdev->size)
86 /* First part goes into this subdev */
87 size = subdev->size - from;
89 /* Entire transaction goes into this subdev */
92 err = mtd_read(subdev, from, size, &retsize, buf);
94 /* Save information about bitflips! */
96 if (mtd_is_eccerr(err)) {
97 mtd->ecc_stats.failed++;
99 } else if (mtd_is_bitflip(err)) {
100 mtd->ecc_stats.corrected++;
101 /* Do not overwrite -EBADMSG !! */
120 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
121 size_t * retlen, const u_char * buf)
123 struct mtd_concat *concat = CONCAT(mtd);
127 for (i = 0; i < concat->num_subdev; i++) {
128 struct mtd_info *subdev = concat->subdev[i];
129 size_t size, retsize;
131 if (to >= subdev->size) {
136 if (to + len > subdev->size)
137 size = subdev->size - to;
141 err = mtd_write(subdev, to, size, &retsize, buf);
158 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
159 unsigned long count, loff_t to, size_t * retlen)
161 struct mtd_concat *concat = CONCAT(mtd);
162 struct kvec *vecs_copy;
163 unsigned long entry_low, entry_high;
164 size_t total_len = 0;
168 /* Calculate total length of data */
169 for (i = 0; i < count; i++)
170 total_len += vecs[i].iov_len;
172 /* Check alignment */
173 if (mtd->writesize > 1) {
175 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
179 /* make a copy of vecs */
180 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
185 for (i = 0; i < concat->num_subdev; i++) {
186 struct mtd_info *subdev = concat->subdev[i];
187 size_t size, wsize, retsize, old_iov_len;
189 if (to >= subdev->size) {
194 size = min_t(uint64_t, total_len, subdev->size - to);
195 wsize = size; /* store for future use */
197 entry_high = entry_low;
198 while (entry_high < count) {
199 if (size <= vecs_copy[entry_high].iov_len)
201 size -= vecs_copy[entry_high++].iov_len;
204 old_iov_len = vecs_copy[entry_high].iov_len;
205 vecs_copy[entry_high].iov_len = size;
207 err = mtd_writev(subdev, &vecs_copy[entry_low],
208 entry_high - entry_low + 1, to, &retsize);
210 vecs_copy[entry_high].iov_len = old_iov_len - size;
211 vecs_copy[entry_high].iov_base += size;
213 entry_low = entry_high;
233 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
235 struct mtd_concat *concat = CONCAT(mtd);
236 struct mtd_oob_ops devops = *ops;
239 ops->retlen = ops->oobretlen = 0;
241 for (i = 0; i < concat->num_subdev; i++) {
242 struct mtd_info *subdev = concat->subdev[i];
244 if (from >= subdev->size) {
245 from -= subdev->size;
250 if (from + devops.len > subdev->size)
251 devops.len = subdev->size - from;
253 err = mtd_read_oob(subdev, from, &devops);
254 ops->retlen += devops.retlen;
255 ops->oobretlen += devops.oobretlen;
257 /* Save information about bitflips! */
259 if (mtd_is_eccerr(err)) {
260 mtd->ecc_stats.failed++;
262 } else if (mtd_is_bitflip(err)) {
263 mtd->ecc_stats.corrected++;
264 /* Do not overwrite -EBADMSG !! */
272 devops.len = ops->len - ops->retlen;
275 devops.datbuf += devops.retlen;
278 devops.ooblen = ops->ooblen - ops->oobretlen;
281 devops.oobbuf += ops->oobretlen;
290 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
292 struct mtd_concat *concat = CONCAT(mtd);
293 struct mtd_oob_ops devops = *ops;
296 if (!(mtd->flags & MTD_WRITEABLE))
299 ops->retlen = ops->oobretlen = 0;
301 for (i = 0; i < concat->num_subdev; i++) {
302 struct mtd_info *subdev = concat->subdev[i];
304 if (to >= subdev->size) {
309 /* partial write ? */
310 if (to + devops.len > subdev->size)
311 devops.len = subdev->size - to;
313 err = mtd_write_oob(subdev, to, &devops);
314 ops->retlen += devops.oobretlen;
319 devops.len = ops->len - ops->retlen;
322 devops.datbuf += devops.retlen;
325 devops.ooblen = ops->ooblen - ops->oobretlen;
328 devops.oobbuf += devops.oobretlen;
335 static void concat_erase_callback(struct erase_info *instr)
337 wake_up((wait_queue_head_t *) instr->priv);
340 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
343 wait_queue_head_t waitq;
344 DECLARE_WAITQUEUE(wait, current);
347 * This code was stol^H^H^H^Hinspired by mtdchar.c
349 init_waitqueue_head(&waitq);
352 erase->callback = concat_erase_callback;
353 erase->priv = (unsigned long) &waitq;
356 * FIXME: Allow INTERRUPTIBLE. Which means
357 * not having the wait_queue head on the stack.
359 err = mtd_erase(mtd, erase);
361 set_current_state(TASK_UNINTERRUPTIBLE);
362 add_wait_queue(&waitq, &wait);
363 if (erase->state != MTD_ERASE_DONE
364 && erase->state != MTD_ERASE_FAILED)
366 remove_wait_queue(&waitq, &wait);
367 set_current_state(TASK_RUNNING);
369 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
374 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
376 struct mtd_concat *concat = CONCAT(mtd);
377 struct mtd_info *subdev;
379 uint64_t length, offset = 0;
380 struct erase_info *erase;
383 * Check for proper erase block alignment of the to-be-erased area.
384 * It is easier to do this based on the super device's erase
385 * region info rather than looking at each particular sub-device
388 if (!concat->mtd.numeraseregions) {
389 /* the easy case: device has uniform erase block size */
390 if (instr->addr & (concat->mtd.erasesize - 1))
392 if (instr->len & (concat->mtd.erasesize - 1))
395 /* device has variable erase size */
396 struct mtd_erase_region_info *erase_regions =
397 concat->mtd.eraseregions;
400 * Find the erase region where the to-be-erased area begins:
402 for (i = 0; i < concat->mtd.numeraseregions &&
403 instr->addr >= erase_regions[i].offset; i++) ;
407 * Now erase_regions[i] is the region in which the
408 * to-be-erased area begins. Verify that the starting
409 * offset is aligned to this region's erase size:
411 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
415 * now find the erase region where the to-be-erased area ends:
417 for (; i < concat->mtd.numeraseregions &&
418 (instr->addr + instr->len) >= erase_regions[i].offset;
422 * check if the ending offset is aligned to this region's erase size
424 if (i < 0 || ((instr->addr + instr->len) &
425 (erase_regions[i].erasesize - 1)))
429 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
431 /* make a local copy of instr to avoid modifying the caller's struct */
432 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
441 * find the subdevice where the to-be-erased area begins, adjust
442 * starting offset to be relative to the subdevice start
444 for (i = 0; i < concat->num_subdev; i++) {
445 subdev = concat->subdev[i];
446 if (subdev->size <= erase->addr) {
447 erase->addr -= subdev->size;
448 offset += subdev->size;
454 /* must never happen since size limit has been verified above */
455 BUG_ON(i >= concat->num_subdev);
457 /* now do the erase: */
459 for (; length > 0; i++) {
460 /* loop for all subdevices affected by this request */
461 subdev = concat->subdev[i]; /* get current subdevice */
463 /* limit length to subdevice's size: */
464 if (erase->addr + length > subdev->size)
465 erase->len = subdev->size - erase->addr;
469 length -= erase->len;
470 if ((err = concat_dev_erase(subdev, erase))) {
471 /* sanity check: should never happen since
472 * block alignment has been checked above */
473 BUG_ON(err == -EINVAL);
474 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
475 instr->fail_addr = erase->fail_addr + offset;
479 * erase->addr specifies the offset of the area to be
480 * erased *within the current subdevice*. It can be
481 * non-zero only the first time through this loop, i.e.
482 * for the first subdevice where blocks need to be erased.
483 * All the following erases must begin at the start of the
484 * current subdevice, i.e. at offset zero.
487 offset += subdev->size;
489 instr->state = erase->state;
495 instr->callback(instr);
499 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
501 struct mtd_concat *concat = CONCAT(mtd);
502 int i, err = -EINVAL;
504 for (i = 0; i < concat->num_subdev; i++) {
505 struct mtd_info *subdev = concat->subdev[i];
508 if (ofs >= subdev->size) {
513 if (ofs + len > subdev->size)
514 size = subdev->size - ofs;
518 err = mtd_lock(subdev, ofs, size);
533 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
535 struct mtd_concat *concat = CONCAT(mtd);
538 for (i = 0; i < concat->num_subdev; i++) {
539 struct mtd_info *subdev = concat->subdev[i];
542 if (ofs >= subdev->size) {
547 if (ofs + len > subdev->size)
548 size = subdev->size - ofs;
552 err = mtd_unlock(subdev, ofs, size);
567 static void concat_sync(struct mtd_info *mtd)
569 struct mtd_concat *concat = CONCAT(mtd);
572 for (i = 0; i < concat->num_subdev; i++) {
573 struct mtd_info *subdev = concat->subdev[i];
578 static int concat_suspend(struct mtd_info *mtd)
580 struct mtd_concat *concat = CONCAT(mtd);
583 for (i = 0; i < concat->num_subdev; i++) {
584 struct mtd_info *subdev = concat->subdev[i];
585 if ((rc = mtd_suspend(subdev)) < 0)
591 static void concat_resume(struct mtd_info *mtd)
593 struct mtd_concat *concat = CONCAT(mtd);
596 for (i = 0; i < concat->num_subdev; i++) {
597 struct mtd_info *subdev = concat->subdev[i];
602 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
604 struct mtd_concat *concat = CONCAT(mtd);
607 if (!mtd_can_have_bb(concat->subdev[0]))
610 for (i = 0; i < concat->num_subdev; i++) {
611 struct mtd_info *subdev = concat->subdev[i];
613 if (ofs >= subdev->size) {
618 res = mtd_block_isbad(subdev, ofs);
625 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
627 struct mtd_concat *concat = CONCAT(mtd);
628 int i, err = -EINVAL;
630 for (i = 0; i < concat->num_subdev; i++) {
631 struct mtd_info *subdev = concat->subdev[i];
633 if (ofs >= subdev->size) {
638 err = mtd_block_markbad(subdev, ofs);
640 mtd->ecc_stats.badblocks++;
648 * try to support NOMMU mmaps on concatenated devices
649 * - we don't support subdev spanning as we can't guarantee it'll work
651 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
653 unsigned long offset,
656 struct mtd_concat *concat = CONCAT(mtd);
659 for (i = 0; i < concat->num_subdev; i++) {
660 struct mtd_info *subdev = concat->subdev[i];
662 if (offset >= subdev->size) {
663 offset -= subdev->size;
667 return mtd_get_unmapped_area(subdev, len, offset, flags);
670 return (unsigned long) -ENOSYS;
674 * This function constructs a virtual MTD device by concatenating
675 * num_devs MTD devices. A pointer to the new device object is
676 * stored to *new_dev upon success. This function does _not_
677 * register any devices: this is the caller's responsibility.
679 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
680 int num_devs, /* number of subdevices */
682 { /* name for the new device */
685 struct mtd_concat *concat;
686 uint32_t max_erasesize, curr_erasesize;
687 int num_erase_region;
688 int max_writebufsize = 0;
690 printk(KERN_NOTICE "Concatenating MTD devices:\n");
691 for (i = 0; i < num_devs; i++)
692 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
693 printk(KERN_NOTICE "into device \"%s\"\n", name);
695 /* allocate the device structure */
696 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
697 concat = kzalloc(size, GFP_KERNEL);
700 ("memory allocation error while creating concatenated device \"%s\"\n",
704 concat->subdev = (struct mtd_info **) (concat + 1);
707 * Set up the new "super" device's MTD object structure, check for
708 * incompatibilities between the subdevices.
710 concat->mtd.type = subdev[0]->type;
711 concat->mtd.flags = subdev[0]->flags;
712 concat->mtd.size = subdev[0]->size;
713 concat->mtd.erasesize = subdev[0]->erasesize;
714 concat->mtd.writesize = subdev[0]->writesize;
716 for (i = 0; i < num_devs; i++)
717 if (max_writebufsize < subdev[i]->writebufsize)
718 max_writebufsize = subdev[i]->writebufsize;
719 concat->mtd.writebufsize = max_writebufsize;
721 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
722 concat->mtd.oobsize = subdev[0]->oobsize;
723 concat->mtd.oobavail = subdev[0]->oobavail;
724 if (subdev[0]->_writev)
725 concat->mtd._writev = concat_writev;
726 if (subdev[0]->_read_oob)
727 concat->mtd._read_oob = concat_read_oob;
728 if (subdev[0]->_write_oob)
729 concat->mtd._write_oob = concat_write_oob;
730 if (subdev[0]->_block_isbad)
731 concat->mtd._block_isbad = concat_block_isbad;
732 if (subdev[0]->_block_markbad)
733 concat->mtd._block_markbad = concat_block_markbad;
735 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
737 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
739 concat->subdev[0] = subdev[0];
741 for (i = 1; i < num_devs; i++) {
742 if (concat->mtd.type != subdev[i]->type) {
744 printk("Incompatible device type on \"%s\"\n",
748 if (concat->mtd.flags != subdev[i]->flags) {
750 * Expect all flags except MTD_WRITEABLE to be
751 * equal on all subdevices.
753 if ((concat->mtd.flags ^ subdev[i]->
754 flags) & ~MTD_WRITEABLE) {
756 printk("Incompatible device flags on \"%s\"\n",
760 /* if writeable attribute differs,
761 make super device writeable */
763 subdev[i]->flags & MTD_WRITEABLE;
766 /* only permit direct mapping if the BDIs are all the same
767 * - copy-mapping is still permitted
769 if (concat->mtd.backing_dev_info !=
770 subdev[i]->backing_dev_info)
771 concat->mtd.backing_dev_info =
772 &default_backing_dev_info;
774 concat->mtd.size += subdev[i]->size;
775 concat->mtd.ecc_stats.badblocks +=
776 subdev[i]->ecc_stats.badblocks;
777 if (concat->mtd.writesize != subdev[i]->writesize ||
778 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
779 concat->mtd.oobsize != subdev[i]->oobsize ||
780 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
781 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
783 printk("Incompatible OOB or ECC data on \"%s\"\n",
787 concat->subdev[i] = subdev[i];
791 concat->mtd.ecclayout = subdev[0]->ecclayout;
793 concat->num_subdev = num_devs;
794 concat->mtd.name = name;
796 concat->mtd._erase = concat_erase;
797 concat->mtd._read = concat_read;
798 concat->mtd._write = concat_write;
799 concat->mtd._sync = concat_sync;
800 concat->mtd._lock = concat_lock;
801 concat->mtd._unlock = concat_unlock;
802 concat->mtd._suspend = concat_suspend;
803 concat->mtd._resume = concat_resume;
804 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
807 * Combine the erase block size info of the subdevices:
809 * first, walk the map of the new device and see how
810 * many changes in erase size we have
812 max_erasesize = curr_erasesize = subdev[0]->erasesize;
813 num_erase_region = 1;
814 for (i = 0; i < num_devs; i++) {
815 if (subdev[i]->numeraseregions == 0) {
816 /* current subdevice has uniform erase size */
817 if (subdev[i]->erasesize != curr_erasesize) {
818 /* if it differs from the last subdevice's erase size, count it */
820 curr_erasesize = subdev[i]->erasesize;
821 if (curr_erasesize > max_erasesize)
822 max_erasesize = curr_erasesize;
825 /* current subdevice has variable erase size */
827 for (j = 0; j < subdev[i]->numeraseregions; j++) {
829 /* walk the list of erase regions, count any changes */
830 if (subdev[i]->eraseregions[j].erasesize !=
834 subdev[i]->eraseregions[j].
836 if (curr_erasesize > max_erasesize)
837 max_erasesize = curr_erasesize;
843 if (num_erase_region == 1) {
845 * All subdevices have the same uniform erase size.
848 concat->mtd.erasesize = curr_erasesize;
849 concat->mtd.numeraseregions = 0;
854 * erase block size varies across the subdevices: allocate
855 * space to store the data describing the variable erase regions
857 struct mtd_erase_region_info *erase_region_p;
858 uint64_t begin, position;
860 concat->mtd.erasesize = max_erasesize;
861 concat->mtd.numeraseregions = num_erase_region;
862 concat->mtd.eraseregions = erase_region_p =
863 kmalloc(num_erase_region *
864 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
865 if (!erase_region_p) {
868 ("memory allocation error while creating erase region list"
869 " for device \"%s\"\n", name);
874 * walk the map of the new device once more and fill in
875 * in erase region info:
877 curr_erasesize = subdev[0]->erasesize;
878 begin = position = 0;
879 for (i = 0; i < num_devs; i++) {
880 if (subdev[i]->numeraseregions == 0) {
881 /* current subdevice has uniform erase size */
882 if (subdev[i]->erasesize != curr_erasesize) {
884 * fill in an mtd_erase_region_info structure for the area
885 * we have walked so far:
887 erase_region_p->offset = begin;
888 erase_region_p->erasesize =
890 tmp64 = position - begin;
891 do_div(tmp64, curr_erasesize);
892 erase_region_p->numblocks = tmp64;
895 curr_erasesize = subdev[i]->erasesize;
898 position += subdev[i]->size;
900 /* current subdevice has variable erase size */
902 for (j = 0; j < subdev[i]->numeraseregions; j++) {
903 /* walk the list of erase regions, count any changes */
904 if (subdev[i]->eraseregions[j].
905 erasesize != curr_erasesize) {
906 erase_region_p->offset = begin;
907 erase_region_p->erasesize =
909 tmp64 = position - begin;
910 do_div(tmp64, curr_erasesize);
911 erase_region_p->numblocks = tmp64;
915 subdev[i]->eraseregions[j].
920 subdev[i]->eraseregions[j].
921 numblocks * (uint64_t)curr_erasesize;
925 /* Now write the final entry */
926 erase_region_p->offset = begin;
927 erase_region_p->erasesize = curr_erasesize;
928 tmp64 = position - begin;
929 do_div(tmp64, curr_erasesize);
930 erase_region_p->numblocks = tmp64;
937 * This function destroys an MTD object obtained from concat_mtd_devs()
940 void mtd_concat_destroy(struct mtd_info *mtd)
942 struct mtd_concat *concat = CONCAT(mtd);
943 if (concat->mtd.numeraseregions)
944 kfree(concat->mtd.eraseregions);
948 EXPORT_SYMBOL(mtd_concat_create);
949 EXPORT_SYMBOL(mtd_concat_destroy);
951 MODULE_LICENSE("GPL");
952 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
953 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");