2 * Swap block device support for MTDs
3 * Turns an MTD device into a swap device with block wear leveling
5 * Copyright © 2007,2011 Nokia Corporation. All rights reserved.
7 * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
9 * Based on Richard Purdie's earlier implementation in 2007. Background
10 * support and lock-less operation written by Adrian Hunter.
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/blktrans.h>
31 #include <linux/rbtree.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/vmalloc.h>
35 #include <linux/genhd.h>
36 #include <linux/swap.h>
37 #include <linux/debugfs.h>
38 #include <linux/seq_file.h>
39 #include <linux/device.h>
40 #include <linux/math64.h>
42 #define MTDSWAP_PREFIX "mtdswap"
45 * The number of free eraseblocks when GC should stop
47 #define CLEAN_BLOCK_THRESHOLD 20
50 * Number of free eraseblocks below which GC can also collect low frag
53 #define LOW_FRAG_GC_TRESHOLD 5
56 * Wear level cost amortization. We want to do wear leveling on the background
57 * without disturbing gc too much. This is made by defining max GC frequency.
58 * Frequency value 6 means 1/6 of the GC passes will pick an erase block based
59 * on the biggest wear difference rather than the biggest dirtiness.
61 * The lower freq2 should be chosen so that it makes sure the maximum erase
62 * difference will decrease even if a malicious application is deliberately
63 * trying to make erase differences large.
65 #define MAX_ERASE_DIFF 4000
66 #define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
67 #define COLLECT_NONDIRTY_FREQ1 6
68 #define COLLECT_NONDIRTY_FREQ2 4
70 #define PAGE_UNDEF UINT_MAX
71 #define BLOCK_UNDEF UINT_MAX
72 #define BLOCK_ERROR (UINT_MAX - 1)
73 #define BLOCK_MAX (UINT_MAX - 2)
75 #define EBLOCK_BAD (1 << 0)
76 #define EBLOCK_NOMAGIC (1 << 1)
77 #define EBLOCK_BITFLIP (1 << 2)
78 #define EBLOCK_FAILED (1 << 3)
79 #define EBLOCK_READERR (1 << 4)
80 #define EBLOCK_IDX_SHIFT 5
87 unsigned int active_count;
88 unsigned int erase_count;
89 unsigned int pad; /* speeds up pointer decremtnt */
92 #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
94 #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
114 struct mtd_blktrans_dev *mbd_dev;
115 struct mtd_info *mtd;
118 unsigned int *page_data;
119 unsigned int *revmap;
122 unsigned int spare_eblks;
123 unsigned int pages_per_eblk;
124 unsigned int max_erase_count;
125 struct swap_eb *eb_data;
127 struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
129 unsigned long long sect_read_count;
130 unsigned long long sect_write_count;
131 unsigned long long mtd_write_count;
132 unsigned long long mtd_read_count;
133 unsigned long long discard_count;
134 unsigned long long discard_page_count;
136 unsigned int curr_write_pos;
137 struct swap_eb *curr_write;
142 struct dentry *debugfs_root;
145 struct mtdswap_oobdata {
148 } __attribute__((packed));
150 #define MTDSWAP_MAGIC_CLEAN 0x2095
151 #define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
152 #define MTDSWAP_TYPE_CLEAN 0
153 #define MTDSWAP_TYPE_DIRTY 1
154 #define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
156 #define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
157 #define MTDSWAP_IO_RETRIES 3
159 #ifdef CONFIG_MTD_SWAP_STRICT
160 #define MTDSWAP_STRICT 1
162 #define MTDSWAP_STRICT 0
166 MTDSWAP_SCANNED_CLEAN,
167 MTDSWAP_SCANNED_DIRTY,
168 MTDSWAP_SCANNED_BITFLIP,
173 * In the worst case mtdswap_writesect() has allocated the last clean
174 * page from the current block and is then pre-empted by the GC
175 * thread. The thread can consume a full erase block when moving a
178 #define MIN_SPARE_EBLOCKS 2
179 #define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
181 #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
182 #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
183 #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
184 #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
186 #define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
188 static char partitions[128] = "";
189 module_param_string(partitions, partitions, sizeof(partitions), 0444);
190 MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
191 "partitions=\"1,3,5\"");
193 static unsigned int spare_eblocks = 10;
194 module_param(spare_eblocks, uint, 0444);
195 MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
196 "garbage collection (default 10%)");
198 static bool header; /* false */
199 module_param(header, bool, 0444);
200 MODULE_PARM_DESC(header,
201 "Include builtin swap header (default 0, without header)");
203 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
205 static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
207 return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
210 static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
213 struct mtdswap_tree *tp;
216 tp = container_of(eb->root, struct mtdswap_tree, root);
217 oldidx = tp - &d->trees[0];
219 d->trees[oldidx].count--;
220 rb_erase(&eb->rb, eb->root);
224 static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
226 struct rb_node **p, *parent = NULL;
232 cur = rb_entry(parent, struct swap_eb, rb);
233 if (eb->erase_count > cur->erase_count)
239 rb_link_node(&eb->rb, parent, p);
240 rb_insert_color(&eb->rb, root);
243 static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
245 struct rb_root *root;
247 if (eb->root == &d->trees[idx].root)
250 mtdswap_eb_detach(d, eb);
251 root = &d->trees[idx].root;
252 __mtdswap_rb_add(root, eb);
254 d->trees[idx].count++;
257 static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
264 while (i < idx && p) {
272 static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
278 eb->flags |= EBLOCK_BAD;
279 mtdswap_eb_detach(d, eb);
282 /* badblocks not supported */
283 if (!d->mtd->block_markbad)
286 offset = mtdswap_eb_offset(d, eb);
287 dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
288 ret = d->mtd->block_markbad(d->mtd, offset);
291 dev_warn(d->dev, "Mark block bad failed for block at %08llx "
292 "error %d\n", offset, ret);
300 static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
302 unsigned int marked = eb->flags & EBLOCK_FAILED;
303 struct swap_eb *curr_write = d->curr_write;
305 eb->flags |= EBLOCK_FAILED;
306 if (curr_write == eb) {
307 d->curr_write = NULL;
309 if (!marked && d->curr_write_pos != 0) {
310 mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
315 return mtdswap_handle_badblock(d, eb);
318 static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
319 struct mtd_oob_ops *ops)
321 int ret = d->mtd->read_oob(d->mtd, from, ops);
327 dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
332 if (ops->oobretlen < ops->ooblen) {
333 dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
334 "%zd) for block at %08llx\n",
335 ops->oobretlen, ops->ooblen, from);
342 static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
344 struct mtdswap_oobdata *data, *data2;
347 struct mtd_oob_ops ops;
349 offset = mtdswap_eb_offset(d, eb);
351 /* Check first if the block is bad. */
352 if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
353 return MTDSWAP_SCANNED_BAD;
355 ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
356 ops.oobbuf = d->oob_buf;
359 ops.mode = MTD_OOB_AUTO;
361 ret = mtdswap_read_oob(d, offset, &ops);
363 if (ret && ret != -EUCLEAN)
366 data = (struct mtdswap_oobdata *)d->oob_buf;
367 data2 = (struct mtdswap_oobdata *)
368 (d->oob_buf + d->mtd->ecclayout->oobavail);
370 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
371 eb->erase_count = le32_to_cpu(data->count);
373 ret = MTDSWAP_SCANNED_BITFLIP;
375 if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
376 ret = MTDSWAP_SCANNED_DIRTY;
378 ret = MTDSWAP_SCANNED_CLEAN;
381 eb->flags |= EBLOCK_NOMAGIC;
382 ret = MTDSWAP_SCANNED_DIRTY;
388 static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
391 struct mtdswap_oobdata n;
394 struct mtd_oob_ops ops;
397 ops.oobbuf = (uint8_t *)&n;
398 ops.mode = MTD_OOB_AUTO;
401 if (marker == MTDSWAP_TYPE_CLEAN) {
402 n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
403 n.count = cpu_to_le32(eb->erase_count);
404 ops.ooblen = MTDSWAP_OOBSIZE;
405 offset = mtdswap_eb_offset(d, eb);
407 n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
408 ops.ooblen = sizeof(n.magic);
409 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
412 ret = d->mtd->write_oob(d->mtd, offset , &ops);
415 dev_warn(d->dev, "Write OOB failed for block at %08llx "
416 "error %d\n", offset, ret);
417 if (ret == -EIO || ret == -EBADMSG)
418 mtdswap_handle_write_error(d, eb);
422 if (ops.oobretlen != ops.ooblen) {
423 dev_warn(d->dev, "Short OOB write for block at %08llx: "
425 offset, ops.oobretlen, ops.ooblen);
433 * Are there any erase blocks without MAGIC_CLEAN header, presumably
434 * because power was cut off after erase but before header write? We
435 * need to guestimate the erase count.
437 static void mtdswap_check_counts(struct mtdswap_dev *d)
439 struct rb_root hist_root = RB_ROOT;
440 struct rb_node *medrb;
442 unsigned int i, cnt, median;
445 for (i = 0; i < d->eblks; i++) {
448 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
451 __mtdswap_rb_add(&hist_root, eb);
458 medrb = mtdswap_rb_index(&hist_root, cnt / 2);
459 median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
461 d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
463 for (i = 0; i < d->eblks; i++) {
466 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
467 eb->erase_count = median;
469 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
472 rb_erase(&eb->rb, &hist_root);
476 static void mtdswap_scan_eblks(struct mtdswap_dev *d)
482 for (i = 0; i < d->eblks; i++) {
485 status = mtdswap_read_markers(d, eb);
487 eb->flags |= EBLOCK_READERR;
488 else if (status == MTDSWAP_SCANNED_BAD) {
489 eb->flags |= EBLOCK_BAD;
494 case MTDSWAP_SCANNED_CLEAN:
497 case MTDSWAP_SCANNED_DIRTY:
498 case MTDSWAP_SCANNED_BITFLIP:
502 idx = MTDSWAP_FAILING;
505 eb->flags |= (idx << EBLOCK_IDX_SHIFT);
508 mtdswap_check_counts(d);
510 for (i = 0; i < d->eblks; i++) {
513 if (eb->flags & EBLOCK_BAD)
516 idx = eb->flags >> EBLOCK_IDX_SHIFT;
517 mtdswap_rb_add(d, eb, idx);
522 * Place eblk into a tree corresponding to its number of active blocks
525 static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
527 unsigned int weight = eb->active_count;
528 unsigned int maxweight = d->pages_per_eblk;
530 if (eb == d->curr_write)
533 if (eb->flags & EBLOCK_BITFLIP)
534 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
535 else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
536 mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
537 if (weight == maxweight)
538 mtdswap_rb_add(d, eb, MTDSWAP_USED);
539 else if (weight == 0)
540 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
541 else if (weight > (maxweight/2))
542 mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
544 mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
548 static void mtdswap_erase_callback(struct erase_info *done)
550 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
554 static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
556 struct mtd_info *mtd = d->mtd;
557 struct erase_info erase;
558 wait_queue_head_t wq;
559 unsigned int retries = 0;
563 if (eb->erase_count > d->max_erase_count)
564 d->max_erase_count = eb->erase_count;
567 init_waitqueue_head(&wq);
568 memset(&erase, 0, sizeof(struct erase_info));
571 erase.callback = mtdswap_erase_callback;
572 erase.addr = mtdswap_eb_offset(d, eb);
573 erase.len = mtd->erasesize;
574 erase.priv = (u_long)&wq;
576 ret = mtd->erase(mtd, &erase);
578 if (retries++ < MTDSWAP_ERASE_RETRIES && !MTDSWAP_STRICT) {
580 "erase of erase block %#llx on %s failed",
581 erase.addr, mtd->name);
586 dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
587 erase.addr, mtd->name);
589 mtdswap_handle_badblock(d, eb);
593 ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
594 erase.state == MTD_ERASE_FAILED);
596 dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
597 erase.addr, mtd->name);
601 if (erase.state == MTD_ERASE_FAILED) {
602 if (retries++ < MTDSWAP_ERASE_RETRIES) {
604 "erase of erase block %#llx on %s failed",
605 erase.addr, mtd->name);
610 mtdswap_handle_badblock(d, eb);
617 static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
621 struct swap_eb *old_eb = d->curr_write;
622 struct rb_root *clean_root;
625 if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
627 if (TREE_EMPTY(d, CLEAN))
630 clean_root = TREE_ROOT(d, CLEAN);
631 eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
632 rb_erase(&eb->rb, clean_root);
634 TREE_COUNT(d, CLEAN)--;
636 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
637 } while (ret == -EIO || ret == -EBADMSG);
642 d->curr_write_pos = 0;
645 mtdswap_store_eb(d, old_eb);
648 *block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
651 d->curr_write->active_count++;
652 d->revmap[*block] = page;
658 static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
660 return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
661 d->pages_per_eblk - d->curr_write_pos;
664 static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
666 return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
669 static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
670 unsigned int page, unsigned int *bp, int gc_context)
672 struct mtd_info *mtd = d->mtd;
680 while (!mtdswap_enough_free_pages(d))
681 if (mtdswap_gc(d, 0) > 0)
684 ret = mtdswap_map_free_block(d, page, bp);
685 eb = d->eb_data + (*bp / d->pages_per_eblk);
687 if (ret == -EIO || ret == -EBADMSG) {
688 d->curr_write = NULL;
690 d->revmap[*bp] = PAGE_UNDEF;
697 writepos = (loff_t)*bp << PAGE_SHIFT;
698 ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
699 if (ret == -EIO || ret == -EBADMSG) {
702 d->revmap[*bp] = PAGE_UNDEF;
703 mtdswap_handle_write_error(d, eb);
708 dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
713 if (retlen != PAGE_SIZE) {
714 dev_err(d->dev, "Short write to MTD device: %zd written",
725 d->revmap[*bp] = PAGE_UNDEF;
730 static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
731 unsigned int *newblock)
733 struct mtd_info *mtd = d->mtd;
734 struct swap_eb *eb, *oldeb;
737 unsigned int page, retries;
740 page = d->revmap[oldblock];
741 readpos = (loff_t) oldblock << PAGE_SHIFT;
745 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
747 if (ret < 0 && ret != -EUCLEAN) {
748 oldeb = d->eb_data + oldblock / d->pages_per_eblk;
749 oldeb->flags |= EBLOCK_READERR;
751 dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
754 if (retries < MTDSWAP_IO_RETRIES)
760 if (retlen != PAGE_SIZE) {
761 dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
767 ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
769 d->page_data[page] = BLOCK_ERROR;
770 dev_err(d->dev, "Write error: %d\n", ret);
774 eb = d->eb_data + *newblock / d->pages_per_eblk;
775 d->page_data[page] = *newblock;
776 d->revmap[oldblock] = PAGE_UNDEF;
777 eb = d->eb_data + oldblock / d->pages_per_eblk;
783 d->page_data[page] = BLOCK_ERROR;
784 d->revmap[oldblock] = PAGE_UNDEF;
788 static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
790 unsigned int i, block, eblk_base, newblock;
794 eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
796 for (i = 0; i < d->pages_per_eblk; i++) {
797 if (d->spare_eblks < MIN_SPARE_EBLOCKS)
800 block = eblk_base + i;
801 if (d->revmap[block] == PAGE_UNDEF)
804 ret = mtdswap_move_block(d, block, &newblock);
805 if (ret < 0 && !errcode)
812 static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
816 if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
817 stopat = MTDSWAP_LOWFRAG;
819 stopat = MTDSWAP_HIFRAG;
821 for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
822 if (d->trees[idx].root.rb_node != NULL)
828 static int mtdswap_wlfreq(unsigned int maxdiff)
830 unsigned int h, x, y, dist, base;
833 * Calculate linear ramp down from f1 to f2 when maxdiff goes from
834 * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
835 * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
838 dist = maxdiff - MAX_ERASE_DIFF;
839 if (dist > COLLECT_NONDIRTY_BASE)
840 dist = COLLECT_NONDIRTY_BASE;
843 * Modelling the slop as right angular triangle with base
844 * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
845 * equal to the ratio h/base.
847 h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
848 base = COLLECT_NONDIRTY_BASE;
851 y = (x * h + base / 2) / base;
853 return COLLECT_NONDIRTY_FREQ2 + y;
856 static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
858 static unsigned int pick_cnt;
859 unsigned int i, idx, wear, max;
860 struct rb_root *root;
863 for (i = 0; i <= MTDSWAP_DIRTY; i++) {
864 root = &d->trees[i].root;
865 if (root->rb_node == NULL)
868 wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
875 if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
884 static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
885 unsigned int background)
889 if (TREE_NONEMPTY(d, FAILING) &&
890 (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
891 return MTDSWAP_FAILING;
893 idx = mtdswap_choose_wl_tree(d);
894 if (idx >= MTDSWAP_CLEAN)
897 return __mtdswap_choose_gc_tree(d);
900 static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
901 unsigned int background)
903 struct rb_root *rp = NULL;
904 struct swap_eb *eb = NULL;
907 if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
908 TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
911 idx = mtdswap_choose_gc_tree(d, background);
915 rp = &d->trees[idx].root;
916 eb = rb_entry(rb_first(rp), struct swap_eb, rb);
918 rb_erase(&eb->rb, rp);
920 d->trees[idx].count--;
924 static unsigned int mtdswap_test_patt(unsigned int i)
926 return i % 2 ? 0x55555555 : 0xAAAAAAAA;
929 static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
932 struct mtd_info *mtd = d->mtd;
933 unsigned int test, i, j, patt, mtd_pages;
935 unsigned int *p1 = (unsigned int *)d->page_buf;
936 unsigned char *p2 = (unsigned char *)d->oob_buf;
937 struct mtd_oob_ops ops;
940 ops.mode = MTD_OOB_AUTO;
941 ops.len = mtd->writesize;
942 ops.ooblen = mtd->ecclayout->oobavail;
944 ops.datbuf = d->page_buf;
945 ops.oobbuf = d->oob_buf;
946 base = mtdswap_eb_offset(d, eb);
947 mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
949 for (test = 0; test < 2; test++) {
951 for (i = 0; i < mtd_pages; i++) {
952 patt = mtdswap_test_patt(test + i);
953 memset(d->page_buf, patt, mtd->writesize);
954 memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
955 ret = mtd->write_oob(mtd, pos, &ops);
959 pos += mtd->writesize;
963 for (i = 0; i < mtd_pages; i++) {
964 ret = mtd->read_oob(mtd, pos, &ops);
968 patt = mtdswap_test_patt(test + i);
969 for (j = 0; j < mtd->writesize/sizeof(int); j++)
973 for (j = 0; j < mtd->ecclayout->oobavail; j++)
974 if (p2[j] != (unsigned char)patt)
977 pos += mtd->writesize;
980 ret = mtdswap_erase_block(d, eb);
985 eb->flags &= ~EBLOCK_READERR;
989 mtdswap_handle_badblock(d, eb);
993 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
998 if (d->spare_eblks < MIN_SPARE_EBLOCKS)
1001 eb = mtdswap_pick_gc_eblk(d, background);
1005 ret = mtdswap_gc_eblock(d, eb);
1009 if (eb->flags & EBLOCK_FAILED) {
1010 mtdswap_handle_badblock(d, eb);
1014 eb->flags &= ~EBLOCK_BITFLIP;
1015 ret = mtdswap_erase_block(d, eb);
1016 if ((eb->flags & EBLOCK_READERR) &&
1017 (ret || !mtdswap_eblk_passes(d, eb)))
1021 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
1024 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
1025 else if (ret != -EIO && ret != -EBADMSG)
1026 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
1031 static void mtdswap_background(struct mtd_blktrans_dev *dev)
1033 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1037 ret = mtdswap_gc(d, 1);
1038 if (ret || mtd_blktrans_cease_background(dev))
1043 static void mtdswap_cleanup(struct mtdswap_dev *d)
1047 vfree(d->page_data);
1052 static int mtdswap_flush(struct mtd_blktrans_dev *dev)
1054 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1057 d->mtd->sync(d->mtd);
1061 static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
1064 unsigned int badcnt;
1068 if (mtd->block_isbad)
1069 for (offset = 0; offset < size; offset += mtd->erasesize)
1070 if (mtd->block_isbad(mtd, offset))
1076 static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
1077 unsigned long page, char *buf)
1079 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1080 unsigned int newblock, mapped;
1084 d->sect_write_count++;
1086 if (d->spare_eblks < MIN_SPARE_EBLOCKS)
1090 /* Ignore writes to the header page */
1091 if (unlikely(page == 0))
1097 mapped = d->page_data[page];
1098 if (mapped <= BLOCK_MAX) {
1099 eb = d->eb_data + (mapped / d->pages_per_eblk);
1101 mtdswap_store_eb(d, eb);
1102 d->page_data[page] = BLOCK_UNDEF;
1103 d->revmap[mapped] = PAGE_UNDEF;
1106 ret = mtdswap_write_block(d, buf, page, &newblock, 0);
1107 d->mtd_write_count++;
1112 eb = d->eb_data + (newblock / d->pages_per_eblk);
1113 d->page_data[page] = newblock;
1118 /* Provide a dummy swap header for the kernel */
1119 static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
1121 union swap_header *hd = (union swap_header *)(buf);
1123 memset(buf, 0, PAGE_SIZE - 10);
1125 hd->info.version = 1;
1126 hd->info.last_page = d->mbd_dev->size - 1;
1127 hd->info.nr_badpages = 0;
1129 memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
1134 static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
1135 unsigned long page, char *buf)
1137 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1138 struct mtd_info *mtd = d->mtd;
1139 unsigned int realblock, retries;
1145 d->sect_read_count++;
1148 if (unlikely(page == 0))
1149 return mtdswap_auto_header(d, buf);
1154 realblock = d->page_data[page];
1155 if (realblock > BLOCK_MAX) {
1156 memset(buf, 0x0, PAGE_SIZE);
1157 if (realblock == BLOCK_UNDEF)
1163 eb = d->eb_data + (realblock / d->pages_per_eblk);
1164 BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
1166 readpos = (loff_t)realblock << PAGE_SHIFT;
1170 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
1172 d->mtd_read_count++;
1173 if (ret == -EUCLEAN) {
1174 eb->flags |= EBLOCK_BITFLIP;
1175 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
1180 dev_err(d->dev, "Read error %d\n", ret);
1181 eb->flags |= EBLOCK_READERR;
1182 mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
1184 if (retries < MTDSWAP_IO_RETRIES)
1190 if (retlen != PAGE_SIZE) {
1191 dev_err(d->dev, "Short read %zd\n", retlen);
1198 static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
1201 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1204 unsigned int mapped;
1208 for (page = first; page < first + nr_pages; page++) {
1209 mapped = d->page_data[page];
1210 if (mapped <= BLOCK_MAX) {
1211 eb = d->eb_data + (mapped / d->pages_per_eblk);
1213 mtdswap_store_eb(d, eb);
1214 d->page_data[page] = BLOCK_UNDEF;
1215 d->revmap[mapped] = PAGE_UNDEF;
1216 d->discard_page_count++;
1217 } else if (mapped == BLOCK_ERROR) {
1218 d->page_data[page] = BLOCK_UNDEF;
1219 d->discard_page_count++;
1226 static int mtdswap_show(struct seq_file *s, void *data)
1228 struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
1230 unsigned int count[MTDSWAP_TREE_CNT];
1231 unsigned int min[MTDSWAP_TREE_CNT];
1232 unsigned int max[MTDSWAP_TREE_CNT];
1233 unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
1235 char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
1238 mutex_lock(&d->mbd_dev->lock);
1240 for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
1241 struct rb_root *root = &d->trees[i].root;
1243 if (root->rb_node) {
1244 count[i] = d->trees[i].count;
1245 min[i] = rb_entry(rb_first(root), struct swap_eb,
1247 max[i] = rb_entry(rb_last(root), struct swap_eb,
1253 if (d->curr_write) {
1255 cwp = d->curr_write_pos;
1256 cwecount = d->curr_write->erase_count;
1260 for (i = 0; i < d->eblks; i++)
1261 sum += d->eb_data[i].erase_count;
1263 use_size = (uint64_t)d->eblks * d->mtd->erasesize;
1264 bb_cnt = mtdswap_badblocks(d->mtd, use_size);
1267 pages = d->mbd_dev->size;
1268 for (i = 0; i < pages; i++)
1269 if (d->page_data[i] != BLOCK_UNDEF)
1272 mutex_unlock(&d->mbd_dev->lock);
1274 for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
1278 if (min[i] != max[i])
1279 seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
1281 name[i], count[i], min[i], max[i]);
1283 seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
1284 "times\n", name[i], count[i], min[i]);
1288 seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
1291 seq_printf(s, "current erase block: %u pages used, %u free, "
1292 "erased %u times\n",
1293 cwp, d->pages_per_eblk - cwp, cwecount);
1295 seq_printf(s, "total erasures: %lu\n", sum);
1297 seq_printf(s, "\n");
1299 seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
1300 seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
1301 seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
1302 seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
1303 seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
1304 seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
1306 seq_printf(s, "\n");
1307 seq_printf(s, "total pages: %u\n", pages);
1308 seq_printf(s, "pages mapped: %u\n", mapped);
1313 static int mtdswap_open(struct inode *inode, struct file *file)
1315 return single_open(file, mtdswap_show, inode->i_private);
1318 static const struct file_operations mtdswap_fops = {
1319 .open = mtdswap_open,
1321 .llseek = seq_lseek,
1322 .release = single_release,
1325 static int mtdswap_add_debugfs(struct mtdswap_dev *d)
1327 struct gendisk *gd = d->mbd_dev->disk;
1328 struct device *dev = disk_to_dev(gd);
1330 struct dentry *root;
1331 struct dentry *dent;
1333 root = debugfs_create_dir(gd->disk_name, NULL);
1338 dev_err(dev, "failed to initialize debugfs\n");
1342 d->debugfs_root = root;
1344 dent = debugfs_create_file("stats", S_IRUSR, root, d,
1347 dev_err(d->dev, "debugfs_create_file failed\n");
1348 debugfs_remove_recursive(root);
1349 d->debugfs_root = NULL;
1356 static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
1357 unsigned int spare_cnt)
1359 struct mtd_info *mtd = d->mbd_dev->mtd;
1360 unsigned int i, eblk_bytes, pages, blocks;
1365 d->spare_eblks = spare_cnt;
1366 d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
1368 pages = d->mbd_dev->size;
1369 blocks = eblocks * d->pages_per_eblk;
1371 for (i = 0; i < MTDSWAP_TREE_CNT; i++)
1372 d->trees[i].root = RB_ROOT;
1374 d->page_data = vmalloc(sizeof(int)*pages);
1376 goto page_data_fail;
1378 d->revmap = vmalloc(sizeof(int)*blocks);
1382 eblk_bytes = sizeof(struct swap_eb)*d->eblks;
1383 d->eb_data = vmalloc(eblk_bytes);
1387 memset(d->eb_data, 0, eblk_bytes);
1388 for (i = 0; i < pages; i++)
1389 d->page_data[i] = BLOCK_UNDEF;
1391 for (i = 0; i < blocks; i++)
1392 d->revmap[i] = PAGE_UNDEF;
1394 d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1398 d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
1402 mtdswap_scan_eblks(d);
1413 vfree(d->page_data);
1415 printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
1419 static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1421 struct mtdswap_dev *d;
1422 struct mtd_blktrans_dev *mbd_dev;
1426 unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
1427 uint64_t swap_size, use_size, size_limit;
1428 struct nand_ecclayout *oinfo;
1431 parts = &partitions[0];
1435 while ((this_opt = strsep(&parts, ",")) != NULL) {
1436 if (strict_strtoul(this_opt, 0, &part) < 0)
1439 if (mtd->index == part)
1443 if (mtd->index != part)
1446 if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
1447 printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
1448 "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
1452 if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
1453 printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
1454 " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
1458 oinfo = mtd->ecclayout;
1459 if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) {
1460 printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1461 "%d available, %lu needed.\n",
1462 MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
1466 if (spare_eblocks > 100)
1467 spare_eblocks = 100;
1469 use_size = mtd->size;
1470 size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
1472 if (mtd->size > size_limit) {
1473 printk(KERN_WARNING "%s: Device too large. Limiting size to "
1474 "%llu bytes\n", MTDSWAP_PREFIX, size_limit);
1475 use_size = size_limit;
1478 eblocks = mtd_div_by_eb(use_size, mtd);
1479 use_size = eblocks * mtd->erasesize;
1480 bad_blocks = mtdswap_badblocks(mtd, use_size);
1481 eavailable = eblocks - bad_blocks;
1483 if (eavailable < MIN_ERASE_BLOCKS) {
1484 printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
1485 "%d needed\n", MTDSWAP_PREFIX, eavailable,
1490 spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
1492 if (spare_cnt < MIN_SPARE_EBLOCKS)
1493 spare_cnt = MIN_SPARE_EBLOCKS;
1495 if (spare_cnt > eavailable - 1)
1496 spare_cnt = eavailable - 1;
1498 swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
1499 (header ? PAGE_SIZE : 0);
1501 printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
1502 "%u spare, %u bad blocks\n",
1503 MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
1505 d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
1509 mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1515 d->mbd_dev = mbd_dev;
1519 mbd_dev->devnum = mtd->index;
1520 mbd_dev->size = swap_size >> PAGE_SHIFT;
1523 if (!(mtd->flags & MTD_WRITEABLE))
1524 mbd_dev->readonly = 1;
1526 if (mtdswap_init(d, eblocks, spare_cnt) < 0)
1529 if (add_mtd_blktrans_dev(mbd_dev) < 0)
1532 d->dev = disk_to_dev(mbd_dev->disk);
1534 ret = mtdswap_add_debugfs(d);
1536 goto debugfs_failed;
1541 del_mtd_blktrans_dev(mbd_dev);
1551 static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
1553 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1555 debugfs_remove_recursive(d->debugfs_root);
1556 del_mtd_blktrans_dev(dev);
1561 static struct mtd_blktrans_ops mtdswap_ops = {
1565 .blksize = PAGE_SIZE,
1566 .flush = mtdswap_flush,
1567 .readsect = mtdswap_readsect,
1568 .writesect = mtdswap_writesect,
1569 .discard = mtdswap_discard,
1570 .background = mtdswap_background,
1571 .add_mtd = mtdswap_add_mtd,
1572 .remove_dev = mtdswap_remove_dev,
1573 .owner = THIS_MODULE,
1576 static int __init mtdswap_modinit(void)
1578 return register_mtd_blktrans(&mtdswap_ops);
1581 static void __exit mtdswap_modexit(void)
1583 deregister_mtd_blktrans(&mtdswap_ops);
1586 module_init(mtdswap_modinit);
1587 module_exit(mtdswap_modexit);
1590 MODULE_LICENSE("GPL");
1591 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
1592 MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "