2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
16 #include <linux/crc32.h>
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object
23 size_t ubi_calc_fm_size(struct ubi_device *ubi)
27 size = sizeof(struct ubi_fm_sb) + \
28 sizeof(struct ubi_fm_hdr) + \
29 sizeof(struct ubi_fm_scan_pool) + \
30 sizeof(struct ubi_fm_scan_pool) + \
31 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
32 (sizeof(struct ubi_fm_eba) + \
33 (ubi->peb_count * sizeof(__be32))) + \
34 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
35 return roundup(size, ubi->leb_size);
40 * new_fm_vhdr - allocate a new volume header for fastmap usage.
41 * @ubi: UBI device description object
42 * @vol_id: the VID of the new header
44 * Returns a new struct ubi_vid_hdr on success.
45 * NULL indicates out of memory.
47 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
49 struct ubi_vid_hdr *new;
51 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
55 new->vol_type = UBI_VID_DYNAMIC;
56 new->vol_id = cpu_to_be32(vol_id);
58 /* UBI implementations without fastmap support have to delete the
61 new->compat = UBI_COMPAT_DELETE;
68 * add_aeb - create and add a attach erase block to a given list.
69 * @ai: UBI attach info object
70 * @list: the target list
71 * @pnum: PEB number of the new attach erase block
72 * @ec: erease counter of the new LEB
73 * @scrub: scrub this PEB after attaching
75 * Returns 0 on success, < 0 indicates an internal error.
77 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
78 int pnum, int ec, int scrub)
80 struct ubi_ainf_peb *aeb;
82 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
90 aeb->copy_flag = aeb->sqnum = 0;
92 ai->ec_sum += aeb->ec;
95 if (ai->max_ec < aeb->ec)
98 if (ai->min_ec > aeb->ec)
101 list_add_tail(&aeb->u.list, list);
107 * add_vol - create and add a new volume to ubi_attach_info.
108 * @ai: ubi_attach_info object
109 * @vol_id: VID of the new volume
110 * @used_ebs: number of used EBS
111 * @data_pad: data padding value of the new volume
112 * @vol_type: volume type
113 * @last_eb_bytes: number of bytes in the last LEB
115 * Returns the new struct ubi_ainf_volume on success.
116 * NULL indicates an error.
118 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
119 int used_ebs, int data_pad, u8 vol_type,
122 struct ubi_ainf_volume *av;
123 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
127 av = rb_entry(parent, struct ubi_ainf_volume, rb);
129 if (vol_id > av->vol_id)
135 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
139 av->highest_lnum = av->leb_count = 0;
141 av->used_ebs = used_ebs;
142 av->data_pad = data_pad;
143 av->last_data_size = last_eb_bytes;
145 av->vol_type = vol_type;
148 dbg_bld("found volume (ID %i)", vol_id);
150 rb_link_node(&av->rb, parent, p);
151 rb_insert_color(&av->rb, &ai->volumes);
158 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
159 * from it's original list.
160 * @ai: ubi_attach_info object
161 * @aeb: the to be assigned SEB
162 * @av: target scan volume
164 static void assign_aeb_to_av(struct ubi_attach_info *ai,
165 struct ubi_ainf_peb *aeb,
166 struct ubi_ainf_volume *av)
168 struct ubi_ainf_peb *tmp_aeb;
169 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
171 p = &av->root.rb_node;
175 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
176 if (aeb->lnum != tmp_aeb->lnum) {
177 if (aeb->lnum < tmp_aeb->lnum)
187 list_del(&aeb->u.list);
190 rb_link_node(&aeb->u.rb, parent, p);
191 rb_insert_color(&aeb->u.rb, &av->root);
195 * update_vol - inserts or updates a LEB which was found a pool.
196 * @ubi: the UBI device object
197 * @ai: attach info object
198 * @av: the volume this LEB belongs to
199 * @new_vh: the volume header derived from new_aeb
200 * @new_aeb: the AEB to be examined
202 * Returns 0 on success, < 0 indicates an internal error.
204 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
205 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
206 struct ubi_ainf_peb *new_aeb)
208 struct rb_node **p = &av->root.rb_node, *parent = NULL;
209 struct ubi_ainf_peb *aeb, *victim;
214 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
216 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
217 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
225 /* This case can happen if the fastmap gets written
226 * because of a volume change (creation, deletion, ..).
227 * Then a PEB can be within the persistent EBA and the pool.
229 if (aeb->pnum == new_aeb->pnum) {
230 ubi_assert(aeb->lnum == new_aeb->lnum);
231 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
236 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
240 /* new_aeb is newer */
242 victim = kmem_cache_alloc(ai->aeb_slab_cache,
247 victim->ec = aeb->ec;
248 victim->pnum = aeb->pnum;
249 list_add_tail(&victim->u.list, &ai->erase);
251 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
252 av->last_data_size = \
253 be32_to_cpu(new_vh->data_size);
255 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
256 av->vol_id, aeb->lnum, new_aeb->pnum);
258 aeb->ec = new_aeb->ec;
259 aeb->pnum = new_aeb->pnum;
260 aeb->copy_flag = new_vh->copy_flag;
261 aeb->scrub = new_aeb->scrub;
262 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
264 /* new_aeb is older */
266 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
267 av->vol_id, aeb->lnum, new_aeb->pnum);
268 list_add_tail(&new_aeb->u.list, &ai->erase);
273 /* This LEB is new, let's add it to the volume */
275 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
276 av->highest_lnum = be32_to_cpu(new_vh->lnum);
277 av->last_data_size = be32_to_cpu(new_vh->data_size);
280 if (av->vol_type == UBI_STATIC_VOLUME)
281 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
285 rb_link_node(&new_aeb->u.rb, parent, p);
286 rb_insert_color(&new_aeb->u.rb, &av->root);
292 * process_pool_aeb - we found a non-empty PEB in a pool.
293 * @ubi: UBI device object
294 * @ai: attach info object
295 * @new_vh: the volume header derived from new_aeb
296 * @new_aeb: the AEB to be examined
298 * Returns 0 on success, < 0 indicates an internal error.
300 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
301 struct ubi_vid_hdr *new_vh,
302 struct ubi_ainf_peb *new_aeb)
304 struct ubi_ainf_volume *av, *tmp_av = NULL;
305 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
308 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
309 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
310 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
315 /* Find the volume this SEB belongs to */
318 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
320 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
322 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
333 ubi_err(ubi, "orphaned volume in fastmap pool!");
334 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
335 return UBI_BAD_FASTMAP;
338 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
340 return update_vol(ubi, ai, av, new_vh, new_aeb);
344 * unmap_peb - unmap a PEB.
345 * If fastmap detects a free PEB in the pool it has to check whether
346 * this PEB has been unmapped after writing the fastmap.
348 * @ai: UBI attach info object
349 * @pnum: The PEB to be unmapped
351 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
353 struct ubi_ainf_volume *av;
354 struct rb_node *node, *node2;
355 struct ubi_ainf_peb *aeb;
357 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
358 av = rb_entry(node, struct ubi_ainf_volume, rb);
360 for (node2 = rb_first(&av->root); node2;
361 node2 = rb_next(node2)) {
362 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
363 if (aeb->pnum == pnum) {
364 rb_erase(&aeb->u.rb, &av->root);
365 kmem_cache_free(ai->aeb_slab_cache, aeb);
373 * scan_pool - scans a pool for changed (no longer empty PEBs).
374 * @ubi: UBI device object
375 * @ai: attach info object
376 * @pebs: an array of all PEB numbers in the to be scanned pool
377 * @pool_size: size of the pool (number of entries in @pebs)
378 * @max_sqnum: pointer to the maximal sequence number
379 * @eba_orphans: list of PEBs which need to be scanned
380 * @free: list of PEBs which are most likely free (and go into @ai->free)
382 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
383 * < 0 indicates an internal error.
385 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
386 int *pebs, int pool_size, unsigned long long *max_sqnum,
387 struct list_head *eba_orphans, struct list_head *free)
389 struct ubi_vid_hdr *vh;
390 struct ubi_ec_hdr *ech;
391 struct ubi_ainf_peb *new_aeb, *tmp_aeb;
392 int i, pnum, err, found_orphan, ret = 0;
394 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
398 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
404 dbg_bld("scanning fastmap pool: size = %i", pool_size);
407 * Now scan all PEBs in the pool to find changes which have been made
408 * after the creation of the fastmap
410 for (i = 0; i < pool_size; i++) {
414 pnum = be32_to_cpu(pebs[i]);
416 if (ubi_io_is_bad(ubi, pnum)) {
417 ubi_err(ubi, "bad PEB in fastmap pool!");
418 ret = UBI_BAD_FASTMAP;
422 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
423 if (err && err != UBI_IO_BITFLIPS) {
424 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
426 ret = err > 0 ? UBI_BAD_FASTMAP : err;
428 } else if (err == UBI_IO_BITFLIPS)
432 * Older UBI implementations have image_seq set to zero, so
433 * we shouldn't fail if image_seq == 0.
435 image_seq = be32_to_cpu(ech->image_seq);
437 if (image_seq && (image_seq != ubi->image_seq)) {
438 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
439 be32_to_cpu(ech->image_seq), ubi->image_seq);
440 ret = UBI_BAD_FASTMAP;
444 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
445 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
446 unsigned long long ec = be64_to_cpu(ech->ec);
448 dbg_bld("Adding PEB to free: %i", pnum);
449 if (err == UBI_IO_FF_BITFLIPS)
450 add_aeb(ai, free, pnum, ec, 1);
452 add_aeb(ai, free, pnum, ec, 0);
454 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
455 dbg_bld("Found non empty PEB:%i in pool", pnum);
457 if (err == UBI_IO_BITFLIPS)
461 list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
462 if (tmp_aeb->pnum == pnum) {
468 list_del(&tmp_aeb->u.list);
469 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
472 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
479 new_aeb->ec = be64_to_cpu(ech->ec);
480 new_aeb->pnum = pnum;
481 new_aeb->lnum = be32_to_cpu(vh->lnum);
482 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
483 new_aeb->copy_flag = vh->copy_flag;
484 new_aeb->scrub = scrub;
486 if (*max_sqnum < new_aeb->sqnum)
487 *max_sqnum = new_aeb->sqnum;
489 err = process_pool_aeb(ubi, ai, vh, new_aeb);
491 ret = err > 0 ? UBI_BAD_FASTMAP : err;
495 /* We are paranoid and fall back to scanning mode */
496 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
497 ret = err > 0 ? UBI_BAD_FASTMAP : err;
504 ubi_free_vid_hdr(ubi, vh);
510 * count_fastmap_pebs - Counts the PEBs found by fastmap.
511 * @ai: The UBI attach info object
513 static int count_fastmap_pebs(struct ubi_attach_info *ai)
515 struct ubi_ainf_peb *aeb;
516 struct ubi_ainf_volume *av;
517 struct rb_node *rb1, *rb2;
520 list_for_each_entry(aeb, &ai->erase, u.list)
523 list_for_each_entry(aeb, &ai->free, u.list)
526 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
527 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
534 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
535 * @ubi: UBI device object
536 * @ai: UBI attach info object
537 * @fm: the fastmap to be attached
539 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
540 * < 0 indicates an internal error.
542 static int ubi_attach_fastmap(struct ubi_device *ubi,
543 struct ubi_attach_info *ai,
544 struct ubi_fastmap_layout *fm)
546 struct list_head used, eba_orphans, free;
547 struct ubi_ainf_volume *av;
548 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
549 struct ubi_ec_hdr *ech;
550 struct ubi_fm_sb *fmsb;
551 struct ubi_fm_hdr *fmhdr;
552 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
553 struct ubi_fm_ec *fmec;
554 struct ubi_fm_volhdr *fmvhdr;
555 struct ubi_fm_eba *fm_eba;
556 int ret, i, j, pool_size, wl_pool_size;
557 size_t fm_pos = 0, fm_size = ubi->fm_size;
558 unsigned long long max_sqnum = 0;
559 void *fm_raw = ubi->fm_buf;
561 INIT_LIST_HEAD(&used);
562 INIT_LIST_HEAD(&free);
563 INIT_LIST_HEAD(&eba_orphans);
564 ai->min_ec = UBI_MAX_ERASECOUNTER;
566 fmsb = (struct ubi_fm_sb *)(fm_raw);
567 ai->max_sqnum = fmsb->sqnum;
568 fm_pos += sizeof(struct ubi_fm_sb);
569 if (fm_pos >= fm_size)
572 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
573 fm_pos += sizeof(*fmhdr);
574 if (fm_pos >= fm_size)
577 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
578 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
579 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
583 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
584 fm_pos += sizeof(*fmpl1);
585 if (fm_pos >= fm_size)
587 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
588 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
589 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
593 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
594 fm_pos += sizeof(*fmpl2);
595 if (fm_pos >= fm_size)
597 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
598 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
599 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
603 pool_size = be16_to_cpu(fmpl1->size);
604 wl_pool_size = be16_to_cpu(fmpl2->size);
605 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
606 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
608 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
609 ubi_err(ubi, "bad pool size: %i", pool_size);
613 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
614 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
619 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
620 fm->max_pool_size < 0) {
621 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
625 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
626 fm->max_wl_pool_size < 0) {
627 ubi_err(ubi, "bad maximal WL pool size: %i",
628 fm->max_wl_pool_size);
632 /* read EC values from free list */
633 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
634 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
635 fm_pos += sizeof(*fmec);
636 if (fm_pos >= fm_size)
639 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
640 be32_to_cpu(fmec->ec), 0);
643 /* read EC values from used list */
644 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
645 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
646 fm_pos += sizeof(*fmec);
647 if (fm_pos >= fm_size)
650 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
651 be32_to_cpu(fmec->ec), 0);
654 /* read EC values from scrub list */
655 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
656 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
657 fm_pos += sizeof(*fmec);
658 if (fm_pos >= fm_size)
661 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
662 be32_to_cpu(fmec->ec), 1);
665 /* read EC values from erase list */
666 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
667 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
668 fm_pos += sizeof(*fmec);
669 if (fm_pos >= fm_size)
672 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
673 be32_to_cpu(fmec->ec), 1);
676 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
677 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
679 /* Iterate over all volumes and read their EBA table */
680 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
681 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
682 fm_pos += sizeof(*fmvhdr);
683 if (fm_pos >= fm_size)
686 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
687 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
688 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
692 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
693 be32_to_cpu(fmvhdr->used_ebs),
694 be32_to_cpu(fmvhdr->data_pad),
696 be32_to_cpu(fmvhdr->last_eb_bytes));
702 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
703 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
705 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
706 fm_pos += sizeof(*fm_eba);
707 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
708 if (fm_pos >= fm_size)
711 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
712 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
713 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
717 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
718 int pnum = be32_to_cpu(fm_eba->pnum[j]);
720 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
724 list_for_each_entry(tmp_aeb, &used, u.list) {
725 if (tmp_aeb->pnum == pnum) {
731 /* This can happen if a PEB is already in an EBA known
732 * by this fastmap but the PEB itself is not in the used
734 * In this case the PEB can be within the fastmap pool
735 * or while writing the fastmap it was in the protection
739 aeb = kmem_cache_alloc(ai->aeb_slab_cache,
748 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
750 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
751 list_add_tail(&aeb->u.list, &eba_orphans);
757 if (av->highest_lnum <= aeb->lnum)
758 av->highest_lnum = aeb->lnum;
760 assign_aeb_to_av(ai, aeb, av);
762 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
763 aeb->pnum, aeb->lnum, av->vol_id);
766 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
772 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
776 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
777 ubi_err(ubi, "bad PEB in fastmap EBA orphan list");
778 ret = UBI_BAD_FASTMAP;
783 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
784 if (err && err != UBI_IO_BITFLIPS) {
785 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
787 ret = err > 0 ? UBI_BAD_FASTMAP : err;
791 } else if (err == UBI_IO_BITFLIPS)
794 tmp_aeb->ec = be64_to_cpu(ech->ec);
795 assign_aeb_to_av(ai, tmp_aeb, av);
801 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
802 &eba_orphans, &free);
806 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
807 &eba_orphans, &free);
811 if (max_sqnum > ai->max_sqnum)
812 ai->max_sqnum = max_sqnum;
814 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
815 list_move_tail(&tmp_aeb->u.list, &ai->free);
817 ubi_assert(list_empty(&used));
818 ubi_assert(list_empty(&eba_orphans));
819 ubi_assert(list_empty(&free));
822 * If fastmap is leaking PEBs (must not happen), raise a
823 * fat warning and fall back to scanning mode.
824 * We do this here because in ubi_wl_init() it's too late
825 * and we cannot fall back to scanning.
827 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
828 ai->bad_peb_count - fm->used_blocks))
834 ret = UBI_BAD_FASTMAP;
836 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
837 list_del(&tmp_aeb->u.list);
838 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
840 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
841 list_del(&tmp_aeb->u.list);
842 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
844 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
845 list_del(&tmp_aeb->u.list);
846 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
853 * ubi_scan_fastmap - scan the fastmap.
854 * @ubi: UBI device object
855 * @ai: UBI attach info to be filled
856 * @fm_anchor: The fastmap starts at this PEB
858 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
859 * UBI_BAD_FASTMAP if one was found but is not usable.
860 * < 0 indicates an internal error.
862 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
865 struct ubi_fm_sb *fmsb, *fmsb2;
866 struct ubi_vid_hdr *vh;
867 struct ubi_ec_hdr *ech;
868 struct ubi_fastmap_layout *fm;
869 int i, used_blocks, pnum, ret = 0;
872 unsigned long long sqnum = 0;
874 mutex_lock(&ubi->fm_mutex);
875 memset(ubi->fm_buf, 0, ubi->fm_size);
877 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
883 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
890 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
891 if (ret && ret != UBI_IO_BITFLIPS)
893 else if (ret == UBI_IO_BITFLIPS)
894 fm->to_be_tortured[0] = 1;
896 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
897 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
898 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
899 ret = UBI_BAD_FASTMAP;
903 if (fmsb->version != UBI_FM_FMT_VERSION) {
904 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
905 fmsb->version, UBI_FM_FMT_VERSION);
906 ret = UBI_BAD_FASTMAP;
910 used_blocks = be32_to_cpu(fmsb->used_blocks);
911 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
912 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
914 ret = UBI_BAD_FASTMAP;
918 fm_size = ubi->leb_size * used_blocks;
919 if (fm_size != ubi->fm_size) {
920 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
921 fm_size, ubi->fm_size);
922 ret = UBI_BAD_FASTMAP;
926 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
932 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
938 for (i = 0; i < used_blocks; i++) {
941 pnum = be32_to_cpu(fmsb->block_loc[i]);
943 if (ubi_io_is_bad(ubi, pnum)) {
944 ret = UBI_BAD_FASTMAP;
948 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
949 if (ret && ret != UBI_IO_BITFLIPS) {
950 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
953 ret = UBI_BAD_FASTMAP;
955 } else if (ret == UBI_IO_BITFLIPS)
956 fm->to_be_tortured[i] = 1;
958 image_seq = be32_to_cpu(ech->image_seq);
960 ubi->image_seq = image_seq;
963 * Older UBI implementations have image_seq set to zero, so
964 * we shouldn't fail if image_seq == 0.
966 if (image_seq && (image_seq != ubi->image_seq)) {
967 ubi_err(ubi, "wrong image seq:%d instead of %d",
968 be32_to_cpu(ech->image_seq), ubi->image_seq);
969 ret = UBI_BAD_FASTMAP;
973 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
974 if (ret && ret != UBI_IO_BITFLIPS) {
975 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
981 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
982 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
983 be32_to_cpu(vh->vol_id),
984 UBI_FM_SB_VOLUME_ID);
985 ret = UBI_BAD_FASTMAP;
989 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
990 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
991 be32_to_cpu(vh->vol_id),
992 UBI_FM_DATA_VOLUME_ID);
993 ret = UBI_BAD_FASTMAP;
998 if (sqnum < be64_to_cpu(vh->sqnum))
999 sqnum = be64_to_cpu(vh->sqnum);
1001 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1002 ubi->leb_start, ubi->leb_size);
1003 if (ret && ret != UBI_IO_BITFLIPS) {
1004 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1005 "err: %i)", i, pnum, ret);
1013 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1014 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1015 fmsb2->data_crc = 0;
1016 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1017 if (crc != tmp_crc) {
1018 ubi_err(ubi, "fastmap data CRC is invalid");
1019 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1021 ret = UBI_BAD_FASTMAP;
1025 fmsb2->sqnum = sqnum;
1027 fm->used_blocks = used_blocks;
1029 ret = ubi_attach_fastmap(ubi, ai, fm);
1032 ret = UBI_BAD_FASTMAP;
1036 for (i = 0; i < used_blocks; i++) {
1037 struct ubi_wl_entry *e;
1039 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1048 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1049 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1054 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1055 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1056 ubi_msg(ubi, "attached by fastmap");
1057 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1058 ubi_msg(ubi, "fastmap WL pool size: %d",
1059 ubi->fm_wl_pool.max_size);
1060 ubi->fm_disabled = 0;
1062 ubi_free_vid_hdr(ubi, vh);
1065 mutex_unlock(&ubi->fm_mutex);
1066 if (ret == UBI_BAD_FASTMAP)
1067 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1071 ubi_free_vid_hdr(ubi, vh);
1080 * ubi_write_fastmap - writes a fastmap.
1081 * @ubi: UBI device object
1082 * @new_fm: the to be written fastmap
1084 * Returns 0 on success, < 0 indicates an internal error.
1086 static int ubi_write_fastmap(struct ubi_device *ubi,
1087 struct ubi_fastmap_layout *new_fm)
1091 struct ubi_fm_sb *fmsb;
1092 struct ubi_fm_hdr *fmh;
1093 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1094 struct ubi_fm_ec *fec;
1095 struct ubi_fm_volhdr *fvh;
1096 struct ubi_fm_eba *feba;
1097 struct rb_node *node;
1098 struct ubi_wl_entry *wl_e;
1099 struct ubi_volume *vol;
1100 struct ubi_vid_hdr *avhdr, *dvhdr;
1101 struct ubi_work *ubi_wrk;
1102 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1103 int scrub_peb_count, erase_peb_count;
1105 fm_raw = ubi->fm_buf;
1106 memset(ubi->fm_buf, 0, ubi->fm_size);
1108 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1114 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1120 spin_lock(&ubi->volumes_lock);
1121 spin_lock(&ubi->wl_lock);
1123 fmsb = (struct ubi_fm_sb *)fm_raw;
1124 fm_pos += sizeof(*fmsb);
1125 ubi_assert(fm_pos <= ubi->fm_size);
1127 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1128 fm_pos += sizeof(*fmh);
1129 ubi_assert(fm_pos <= ubi->fm_size);
1131 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1132 fmsb->version = UBI_FM_FMT_VERSION;
1133 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1134 /* the max sqnum will be filled in while *reading* the fastmap */
1137 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1140 scrub_peb_count = 0;
1141 erase_peb_count = 0;
1144 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1145 fm_pos += sizeof(*fmpl1);
1146 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1147 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1148 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1150 for (i = 0; i < ubi->fm_pool.size; i++)
1151 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1153 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1154 fm_pos += sizeof(*fmpl2);
1155 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1156 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1157 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1159 for (i = 0; i < ubi->fm_wl_pool.size; i++)
1160 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1162 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1163 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1164 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1166 fec->pnum = cpu_to_be32(wl_e->pnum);
1167 fec->ec = cpu_to_be32(wl_e->ec);
1170 fm_pos += sizeof(*fec);
1171 ubi_assert(fm_pos <= ubi->fm_size);
1173 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1175 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1176 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1177 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1179 fec->pnum = cpu_to_be32(wl_e->pnum);
1180 fec->ec = cpu_to_be32(wl_e->ec);
1183 fm_pos += sizeof(*fec);
1184 ubi_assert(fm_pos <= ubi->fm_size);
1187 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) {
1188 list_for_each_entry(wl_e, &ubi->pq[i], u.list) {
1189 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1191 fec->pnum = cpu_to_be32(wl_e->pnum);
1192 fec->ec = cpu_to_be32(wl_e->ec);
1195 fm_pos += sizeof(*fec);
1196 ubi_assert(fm_pos <= ubi->fm_size);
1199 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1201 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1202 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1203 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1205 fec->pnum = cpu_to_be32(wl_e->pnum);
1206 fec->ec = cpu_to_be32(wl_e->ec);
1209 fm_pos += sizeof(*fec);
1210 ubi_assert(fm_pos <= ubi->fm_size);
1212 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1215 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1216 if (ubi_is_erase_work(ubi_wrk)) {
1220 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1222 fec->pnum = cpu_to_be32(wl_e->pnum);
1223 fec->ec = cpu_to_be32(wl_e->ec);
1226 fm_pos += sizeof(*fec);
1227 ubi_assert(fm_pos <= ubi->fm_size);
1230 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1232 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1233 vol = ubi->volumes[i];
1240 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1241 fm_pos += sizeof(*fvh);
1242 ubi_assert(fm_pos <= ubi->fm_size);
1244 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1245 fvh->vol_id = cpu_to_be32(vol->vol_id);
1246 fvh->vol_type = vol->vol_type;
1247 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1248 fvh->data_pad = cpu_to_be32(vol->data_pad);
1249 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1251 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1252 vol->vol_type == UBI_STATIC_VOLUME);
1254 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1255 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1256 ubi_assert(fm_pos <= ubi->fm_size);
1258 for (j = 0; j < vol->reserved_pebs; j++)
1259 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1261 feba->reserved_pebs = cpu_to_be32(j);
1262 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1264 fmh->vol_count = cpu_to_be32(vol_count);
1265 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1267 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1270 spin_unlock(&ubi->wl_lock);
1271 spin_unlock(&ubi->volumes_lock);
1273 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1274 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1276 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1280 for (i = 0; i < new_fm->used_blocks; i++) {
1281 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1282 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1286 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1289 for (i = 1; i < new_fm->used_blocks; i++) {
1290 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1291 dvhdr->lnum = cpu_to_be32(i);
1292 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1293 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1294 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1296 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1297 new_fm->e[i]->pnum);
1302 for (i = 0; i < new_fm->used_blocks; i++) {
1303 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1304 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1306 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1307 new_fm->e[i]->pnum);
1315 dbg_bld("fastmap written!");
1318 ubi_free_vid_hdr(ubi, avhdr);
1319 ubi_free_vid_hdr(ubi, dvhdr);
1325 * erase_block - Manually erase a PEB.
1326 * @ubi: UBI device object
1327 * @pnum: PEB to be erased
1329 * Returns the new EC value on success, < 0 indicates an internal error.
1331 static int erase_block(struct ubi_device *ubi, int pnum)
1334 struct ubi_ec_hdr *ec_hdr;
1337 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1341 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1344 else if (ret && ret != UBI_IO_BITFLIPS) {
1349 ret = ubi_io_sync_erase(ubi, pnum, 0);
1353 ec = be64_to_cpu(ec_hdr->ec);
1355 if (ec > UBI_MAX_ERASECOUNTER) {
1360 ec_hdr->ec = cpu_to_be64(ec);
1361 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1372 * invalidate_fastmap - destroys a fastmap.
1373 * @ubi: UBI device object
1374 * @fm: the fastmap to be destroyed
1376 * Returns 0 on success, < 0 indicates an internal error.
1378 static int invalidate_fastmap(struct ubi_device *ubi,
1379 struct ubi_fastmap_layout *fm)
1382 struct ubi_vid_hdr *vh;
1384 ret = erase_block(ubi, fm->e[0]->pnum);
1388 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1392 /* deleting the current fastmap SB is not enough, an old SB may exist,
1393 * so create a (corrupted) SB such that fastmap will find it and fall
1394 * back to scanning mode in any case */
1395 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1396 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1402 * ubi_update_fastmap - will be called by UBI if a volume changes or
1403 * a fastmap pool becomes full.
1404 * @ubi: UBI device object
1406 * Returns 0 on success, < 0 indicates an internal error.
1408 int ubi_update_fastmap(struct ubi_device *ubi)
1411 struct ubi_fastmap_layout *new_fm, *old_fm;
1412 struct ubi_wl_entry *tmp_e;
1414 mutex_lock(&ubi->fm_mutex);
1416 ubi_refill_pools(ubi);
1418 if (ubi->ro_mode || ubi->fm_disabled) {
1419 mutex_unlock(&ubi->fm_mutex);
1423 ret = ubi_ensure_anchor_pebs(ubi);
1425 mutex_unlock(&ubi->fm_mutex);
1429 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1431 mutex_unlock(&ubi->fm_mutex);
1435 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1439 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1440 ubi_err(ubi, "fastmap too large");
1445 for (i = 1; i < new_fm->used_blocks; i++) {
1446 spin_lock(&ubi->wl_lock);
1447 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1448 spin_unlock(&ubi->wl_lock);
1450 if (!tmp_e && !old_fm) {
1452 ubi_err(ubi, "could not get any free erase block");
1454 for (j = 1; j < i; j++)
1455 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1459 } else if (!tmp_e && old_fm) {
1460 ret = erase_block(ubi, old_fm->e[i]->pnum);
1464 for (j = 1; j < i; j++)
1465 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1468 ubi_err(ubi, "could not erase old fastmap PEB");
1471 new_fm->e[i] = old_fm->e[i];
1473 new_fm->e[i] = tmp_e;
1476 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1477 old_fm->to_be_tortured[i]);
1481 spin_lock(&ubi->wl_lock);
1482 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1483 spin_unlock(&ubi->wl_lock);
1486 /* no fresh anchor PEB was found, reuse the old one */
1488 ret = erase_block(ubi, old_fm->e[0]->pnum);
1491 ubi_err(ubi, "could not erase old anchor PEB");
1493 for (i = 1; i < new_fm->used_blocks; i++)
1494 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1498 new_fm->e[0] = old_fm->e[0];
1499 new_fm->e[0]->ec = ret;
1501 /* we've got a new anchor PEB, return the old one */
1502 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1503 old_fm->to_be_tortured[0]);
1504 new_fm->e[0] = tmp_e;
1509 ubi_err(ubi, "could not find any anchor PEB");
1511 for (i = 1; i < new_fm->used_blocks; i++)
1512 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1517 new_fm->e[0] = tmp_e;
1520 down_write(&ubi->work_sem);
1521 down_write(&ubi->fm_sem);
1522 ret = ubi_write_fastmap(ubi, new_fm);
1523 up_write(&ubi->fm_sem);
1524 up_write(&ubi->work_sem);
1530 mutex_unlock(&ubi->fm_mutex);
1537 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1541 ret = invalidate_fastmap(ubi, old_fm);
1543 ubi_err(ubi, "Unable to invalidiate current fastmap!");