2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
67 struct scrub_block *sblock;
69 struct btrfs_device *dev;
70 u64 flags; /* extent flags */
74 u64 physical_for_dev_replace;
77 unsigned int mirror_num:8;
78 unsigned int have_csum:1;
79 unsigned int io_error:1;
81 u8 csum[BTRFS_CSUM_SIZE];
86 struct scrub_ctx *sctx;
87 struct btrfs_device *dev;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
95 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
99 struct btrfs_work work;
103 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
105 atomic_t outstanding_pages;
106 atomic_t ref_count; /* free mem on transition to zero */
107 struct scrub_ctx *sctx;
109 unsigned int header_error:1;
110 unsigned int checksum_error:1;
111 unsigned int no_io_error_seen:1;
112 unsigned int generation_error:1; /* also sets header_error */
116 struct scrub_wr_ctx {
117 struct scrub_bio *wr_curr_bio;
118 struct btrfs_device *tgtdev;
119 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
120 atomic_t flush_all_writes;
121 struct mutex wr_lock;
125 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
126 struct btrfs_root *dev_root;
129 atomic_t bios_in_flight;
130 atomic_t workers_pending;
131 spinlock_t list_lock;
132 wait_queue_head_t list_wait;
134 struct list_head csum_list;
137 int pages_per_rd_bio;
143 struct scrub_wr_ctx wr_ctx;
148 struct btrfs_scrub_progress stat;
149 spinlock_t stat_lock;
152 struct scrub_fixup_nodatasum {
153 struct scrub_ctx *sctx;
154 struct btrfs_device *dev;
156 struct btrfs_root *root;
157 struct btrfs_work work;
161 struct scrub_copy_nocow_ctx {
162 struct scrub_ctx *sctx;
166 u64 physical_for_dev_replace;
167 struct btrfs_work work;
170 struct scrub_warning {
171 struct btrfs_path *path;
172 u64 extent_item_size;
178 struct btrfs_device *dev;
184 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
185 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
186 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
187 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
188 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
189 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
190 struct btrfs_fs_info *fs_info,
191 struct scrub_block *original_sblock,
192 u64 length, u64 logical,
193 struct scrub_block *sblocks_for_recheck);
194 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
195 struct scrub_block *sblock, int is_metadata,
196 int have_csum, u8 *csum, u64 generation,
198 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
199 struct scrub_block *sblock,
200 int is_metadata, int have_csum,
201 const u8 *csum, u64 generation,
203 static void scrub_complete_bio_end_io(struct bio *bio, int err);
204 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
205 struct scrub_block *sblock_good,
207 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
208 struct scrub_block *sblock_good,
209 int page_num, int force_write);
210 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
211 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
213 static int scrub_checksum_data(struct scrub_block *sblock);
214 static int scrub_checksum_tree_block(struct scrub_block *sblock);
215 static int scrub_checksum_super(struct scrub_block *sblock);
216 static void scrub_block_get(struct scrub_block *sblock);
217 static void scrub_block_put(struct scrub_block *sblock);
218 static void scrub_page_get(struct scrub_page *spage);
219 static void scrub_page_put(struct scrub_page *spage);
220 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
221 struct scrub_page *spage);
222 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
223 u64 physical, struct btrfs_device *dev, u64 flags,
224 u64 gen, int mirror_num, u8 *csum, int force,
225 u64 physical_for_dev_replace);
226 static void scrub_bio_end_io(struct bio *bio, int err);
227 static void scrub_bio_end_io_worker(struct btrfs_work *work);
228 static void scrub_block_complete(struct scrub_block *sblock);
229 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
230 u64 extent_logical, u64 extent_len,
231 u64 *extent_physical,
232 struct btrfs_device **extent_dev,
233 int *extent_mirror_num);
234 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
235 struct scrub_wr_ctx *wr_ctx,
236 struct btrfs_fs_info *fs_info,
237 struct btrfs_device *dev,
239 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
240 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
241 struct scrub_page *spage);
242 static void scrub_wr_submit(struct scrub_ctx *sctx);
243 static void scrub_wr_bio_end_io(struct bio *bio, int err);
244 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
245 static int write_page_nocow(struct scrub_ctx *sctx,
246 u64 physical_for_dev_replace, struct page *page);
247 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
249 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
250 int mirror_num, u64 physical_for_dev_replace);
251 static void copy_nocow_pages_worker(struct btrfs_work *work);
254 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
256 atomic_inc(&sctx->bios_in_flight);
259 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
261 atomic_dec(&sctx->bios_in_flight);
262 wake_up(&sctx->list_wait);
266 * used for workers that require transaction commits (i.e., for the
269 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
271 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
274 * increment scrubs_running to prevent cancel requests from
275 * completing as long as a worker is running. we must also
276 * increment scrubs_paused to prevent deadlocking on pause
277 * requests used for transactions commits (as the worker uses a
278 * transaction context). it is safe to regard the worker
279 * as paused for all matters practical. effectively, we only
280 * avoid cancellation requests from completing.
282 mutex_lock(&fs_info->scrub_lock);
283 atomic_inc(&fs_info->scrubs_running);
284 atomic_inc(&fs_info->scrubs_paused);
285 mutex_unlock(&fs_info->scrub_lock);
286 atomic_inc(&sctx->workers_pending);
289 /* used for workers that require transaction commits */
290 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
292 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
295 * see scrub_pending_trans_workers_inc() why we're pretending
296 * to be paused in the scrub counters
298 mutex_lock(&fs_info->scrub_lock);
299 atomic_dec(&fs_info->scrubs_running);
300 atomic_dec(&fs_info->scrubs_paused);
301 mutex_unlock(&fs_info->scrub_lock);
302 atomic_dec(&sctx->workers_pending);
303 wake_up(&fs_info->scrub_pause_wait);
304 wake_up(&sctx->list_wait);
307 static void scrub_free_csums(struct scrub_ctx *sctx)
309 while (!list_empty(&sctx->csum_list)) {
310 struct btrfs_ordered_sum *sum;
311 sum = list_first_entry(&sctx->csum_list,
312 struct btrfs_ordered_sum, list);
313 list_del(&sum->list);
318 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
325 scrub_free_wr_ctx(&sctx->wr_ctx);
327 /* this can happen when scrub is cancelled */
328 if (sctx->curr != -1) {
329 struct scrub_bio *sbio = sctx->bios[sctx->curr];
331 for (i = 0; i < sbio->page_count; i++) {
332 WARN_ON(!sbio->pagev[i]->page);
333 scrub_block_put(sbio->pagev[i]->sblock);
338 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
339 struct scrub_bio *sbio = sctx->bios[i];
346 scrub_free_csums(sctx);
350 static noinline_for_stack
351 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
353 struct scrub_ctx *sctx;
355 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
356 int pages_per_rd_bio;
360 * the setting of pages_per_rd_bio is correct for scrub but might
361 * be wrong for the dev_replace code where we might read from
362 * different devices in the initial huge bios. However, that
363 * code is able to correctly handle the case when adding a page
367 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
368 bio_get_nr_vecs(dev->bdev));
370 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
371 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
374 sctx->is_dev_replace = is_dev_replace;
375 sctx->pages_per_rd_bio = pages_per_rd_bio;
377 sctx->dev_root = dev->dev_root;
378 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
379 struct scrub_bio *sbio;
381 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
384 sctx->bios[i] = sbio;
388 sbio->page_count = 0;
389 sbio->work.func = scrub_bio_end_io_worker;
391 if (i != SCRUB_BIOS_PER_SCTX - 1)
392 sctx->bios[i]->next_free = i + 1;
394 sctx->bios[i]->next_free = -1;
396 sctx->first_free = 0;
397 sctx->nodesize = dev->dev_root->nodesize;
398 sctx->leafsize = dev->dev_root->leafsize;
399 sctx->sectorsize = dev->dev_root->sectorsize;
400 atomic_set(&sctx->bios_in_flight, 0);
401 atomic_set(&sctx->workers_pending, 0);
402 atomic_set(&sctx->cancel_req, 0);
403 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
404 INIT_LIST_HEAD(&sctx->csum_list);
406 spin_lock_init(&sctx->list_lock);
407 spin_lock_init(&sctx->stat_lock);
408 init_waitqueue_head(&sctx->list_wait);
410 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
411 fs_info->dev_replace.tgtdev, is_dev_replace);
413 scrub_free_ctx(sctx);
419 scrub_free_ctx(sctx);
420 return ERR_PTR(-ENOMEM);
423 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
430 struct extent_buffer *eb;
431 struct btrfs_inode_item *inode_item;
432 struct scrub_warning *swarn = warn_ctx;
433 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
434 struct inode_fs_paths *ipath = NULL;
435 struct btrfs_root *local_root;
436 struct btrfs_key root_key;
438 root_key.objectid = root;
439 root_key.type = BTRFS_ROOT_ITEM_KEY;
440 root_key.offset = (u64)-1;
441 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
442 if (IS_ERR(local_root)) {
443 ret = PTR_ERR(local_root);
447 ret = inode_item_info(inum, 0, local_root, swarn->path);
449 btrfs_release_path(swarn->path);
453 eb = swarn->path->nodes[0];
454 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
455 struct btrfs_inode_item);
456 isize = btrfs_inode_size(eb, inode_item);
457 nlink = btrfs_inode_nlink(eb, inode_item);
458 btrfs_release_path(swarn->path);
460 ipath = init_ipath(4096, local_root, swarn->path);
462 ret = PTR_ERR(ipath);
466 ret = paths_from_inode(inum, ipath);
472 * we deliberately ignore the bit ipath might have been too small to
473 * hold all of the paths here
475 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
476 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
477 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
478 "length %llu, links %u (path: %s)\n", swarn->errstr,
479 swarn->logical, rcu_str_deref(swarn->dev->name),
480 (unsigned long long)swarn->sector, root, inum, offset,
481 min(isize - offset, (u64)PAGE_SIZE), nlink,
482 (char *)(unsigned long)ipath->fspath->val[i]);
488 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
489 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
490 "resolving failed with ret=%d\n", swarn->errstr,
491 swarn->logical, rcu_str_deref(swarn->dev->name),
492 (unsigned long long)swarn->sector, root, inum, offset, ret);
498 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
500 struct btrfs_device *dev;
501 struct btrfs_fs_info *fs_info;
502 struct btrfs_path *path;
503 struct btrfs_key found_key;
504 struct extent_buffer *eb;
505 struct btrfs_extent_item *ei;
506 struct scrub_warning swarn;
507 unsigned long ptr = 0;
513 const int bufsize = 4096;
516 WARN_ON(sblock->page_count < 1);
517 dev = sblock->pagev[0]->dev;
518 fs_info = sblock->sctx->dev_root->fs_info;
520 path = btrfs_alloc_path();
522 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
523 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
524 swarn.sector = (sblock->pagev[0]->physical) >> 9;
525 swarn.logical = sblock->pagev[0]->logical;
526 swarn.errstr = errstr;
528 swarn.msg_bufsize = bufsize;
529 swarn.scratch_bufsize = bufsize;
531 if (!path || !swarn.scratch_buf || !swarn.msg_buf)
534 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
539 extent_item_pos = swarn.logical - found_key.objectid;
540 swarn.extent_item_size = found_key.offset;
543 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
544 item_size = btrfs_item_size_nr(eb, path->slots[0]);
546 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
548 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
549 item_size, &ref_root,
551 printk_in_rcu(KERN_WARNING
552 "btrfs: %s at logical %llu on dev %s, "
553 "sector %llu: metadata %s (level %d) in tree "
554 "%llu\n", errstr, swarn.logical,
555 rcu_str_deref(dev->name),
556 (unsigned long long)swarn.sector,
557 ref_level ? "node" : "leaf",
558 ret < 0 ? -1 : ref_level,
559 ret < 0 ? -1 : ref_root);
561 btrfs_release_path(path);
563 btrfs_release_path(path);
566 iterate_extent_inodes(fs_info, found_key.objectid,
568 scrub_print_warning_inode, &swarn);
572 btrfs_free_path(path);
573 kfree(swarn.scratch_buf);
574 kfree(swarn.msg_buf);
577 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
579 struct page *page = NULL;
581 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
584 struct btrfs_key key;
585 struct inode *inode = NULL;
586 struct btrfs_fs_info *fs_info;
587 u64 end = offset + PAGE_SIZE - 1;
588 struct btrfs_root *local_root;
592 key.type = BTRFS_ROOT_ITEM_KEY;
593 key.offset = (u64)-1;
595 fs_info = fixup->root->fs_info;
596 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
598 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
599 if (IS_ERR(local_root)) {
600 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
601 return PTR_ERR(local_root);
604 key.type = BTRFS_INODE_ITEM_KEY;
607 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
608 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
610 return PTR_ERR(inode);
612 index = offset >> PAGE_CACHE_SHIFT;
614 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
620 if (PageUptodate(page)) {
621 if (PageDirty(page)) {
623 * we need to write the data to the defect sector. the
624 * data that was in that sector is not in memory,
625 * because the page was modified. we must not write the
626 * modified page to that sector.
628 * TODO: what could be done here: wait for the delalloc
629 * runner to write out that page (might involve
630 * COW) and see whether the sector is still
631 * referenced afterwards.
633 * For the meantime, we'll treat this error
634 * incorrectable, although there is a chance that a
635 * later scrub will find the bad sector again and that
636 * there's no dirty page in memory, then.
641 fs_info = BTRFS_I(inode)->root->fs_info;
642 ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
643 fixup->logical, page,
649 * we need to get good data first. the general readpage path
650 * will call repair_io_failure for us, we just have to make
651 * sure we read the bad mirror.
653 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
654 EXTENT_DAMAGED, GFP_NOFS);
656 /* set_extent_bits should give proper error */
663 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
666 wait_on_page_locked(page);
668 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
669 end, EXTENT_DAMAGED, 0, NULL);
671 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
672 EXTENT_DAMAGED, GFP_NOFS);
684 if (ret == 0 && corrected) {
686 * we only need to call readpage for one of the inodes belonging
687 * to this extent. so make iterate_extent_inodes stop
695 static void scrub_fixup_nodatasum(struct btrfs_work *work)
698 struct scrub_fixup_nodatasum *fixup;
699 struct scrub_ctx *sctx;
700 struct btrfs_trans_handle *trans = NULL;
701 struct btrfs_fs_info *fs_info;
702 struct btrfs_path *path;
703 int uncorrectable = 0;
705 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
707 fs_info = fixup->root->fs_info;
709 path = btrfs_alloc_path();
711 spin_lock(&sctx->stat_lock);
712 ++sctx->stat.malloc_errors;
713 spin_unlock(&sctx->stat_lock);
718 trans = btrfs_join_transaction(fixup->root);
725 * the idea is to trigger a regular read through the standard path. we
726 * read a page from the (failed) logical address by specifying the
727 * corresponding copynum of the failed sector. thus, that readpage is
729 * that is the point where on-the-fly error correction will kick in
730 * (once it's finished) and rewrite the failed sector if a good copy
733 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
734 path, scrub_fixup_readpage,
742 spin_lock(&sctx->stat_lock);
743 ++sctx->stat.corrected_errors;
744 spin_unlock(&sctx->stat_lock);
747 if (trans && !IS_ERR(trans))
748 btrfs_end_transaction(trans, fixup->root);
750 spin_lock(&sctx->stat_lock);
751 ++sctx->stat.uncorrectable_errors;
752 spin_unlock(&sctx->stat_lock);
753 btrfs_dev_replace_stats_inc(
754 &sctx->dev_root->fs_info->dev_replace.
755 num_uncorrectable_read_errors);
756 printk_ratelimited_in_rcu(KERN_ERR
757 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
758 (unsigned long long)fixup->logical,
759 rcu_str_deref(fixup->dev->name));
762 btrfs_free_path(path);
765 scrub_pending_trans_workers_dec(sctx);
769 * scrub_handle_errored_block gets called when either verification of the
770 * pages failed or the bio failed to read, e.g. with EIO. In the latter
771 * case, this function handles all pages in the bio, even though only one
773 * The goal of this function is to repair the errored block by using the
774 * contents of one of the mirrors.
776 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
778 struct scrub_ctx *sctx = sblock_to_check->sctx;
779 struct btrfs_device *dev;
780 struct btrfs_fs_info *fs_info;
784 unsigned int failed_mirror_index;
785 unsigned int is_metadata;
786 unsigned int have_csum;
788 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
789 struct scrub_block *sblock_bad;
794 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
795 DEFAULT_RATELIMIT_BURST);
797 BUG_ON(sblock_to_check->page_count < 1);
798 fs_info = sctx->dev_root->fs_info;
799 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
801 * if we find an error in a super block, we just report it.
802 * They will get written with the next transaction commit
805 spin_lock(&sctx->stat_lock);
806 ++sctx->stat.super_errors;
807 spin_unlock(&sctx->stat_lock);
810 length = sblock_to_check->page_count * PAGE_SIZE;
811 logical = sblock_to_check->pagev[0]->logical;
812 generation = sblock_to_check->pagev[0]->generation;
813 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
814 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
815 is_metadata = !(sblock_to_check->pagev[0]->flags &
816 BTRFS_EXTENT_FLAG_DATA);
817 have_csum = sblock_to_check->pagev[0]->have_csum;
818 csum = sblock_to_check->pagev[0]->csum;
819 dev = sblock_to_check->pagev[0]->dev;
821 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
822 sblocks_for_recheck = NULL;
827 * read all mirrors one after the other. This includes to
828 * re-read the extent or metadata block that failed (that was
829 * the cause that this fixup code is called) another time,
830 * page by page this time in order to know which pages
831 * caused I/O errors and which ones are good (for all mirrors).
832 * It is the goal to handle the situation when more than one
833 * mirror contains I/O errors, but the errors do not
834 * overlap, i.e. the data can be repaired by selecting the
835 * pages from those mirrors without I/O error on the
836 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
837 * would be that mirror #1 has an I/O error on the first page,
838 * the second page is good, and mirror #2 has an I/O error on
839 * the second page, but the first page is good.
840 * Then the first page of the first mirror can be repaired by
841 * taking the first page of the second mirror, and the
842 * second page of the second mirror can be repaired by
843 * copying the contents of the 2nd page of the 1st mirror.
844 * One more note: if the pages of one mirror contain I/O
845 * errors, the checksum cannot be verified. In order to get
846 * the best data for repairing, the first attempt is to find
847 * a mirror without I/O errors and with a validated checksum.
848 * Only if this is not possible, the pages are picked from
849 * mirrors with I/O errors without considering the checksum.
850 * If the latter is the case, at the end, the checksum of the
851 * repaired area is verified in order to correctly maintain
855 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
856 sizeof(*sblocks_for_recheck),
858 if (!sblocks_for_recheck) {
859 spin_lock(&sctx->stat_lock);
860 sctx->stat.malloc_errors++;
861 sctx->stat.read_errors++;
862 sctx->stat.uncorrectable_errors++;
863 spin_unlock(&sctx->stat_lock);
864 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
868 /* setup the context, map the logical blocks and alloc the pages */
869 ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
870 logical, sblocks_for_recheck);
872 spin_lock(&sctx->stat_lock);
873 sctx->stat.read_errors++;
874 sctx->stat.uncorrectable_errors++;
875 spin_unlock(&sctx->stat_lock);
876 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
879 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
880 sblock_bad = sblocks_for_recheck + failed_mirror_index;
882 /* build and submit the bios for the failed mirror, check checksums */
883 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
884 csum, generation, sctx->csum_size);
886 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
887 sblock_bad->no_io_error_seen) {
889 * the error disappeared after reading page by page, or
890 * the area was part of a huge bio and other parts of the
891 * bio caused I/O errors, or the block layer merged several
892 * read requests into one and the error is caused by a
893 * different bio (usually one of the two latter cases is
896 spin_lock(&sctx->stat_lock);
897 sctx->stat.unverified_errors++;
898 spin_unlock(&sctx->stat_lock);
900 if (sctx->is_dev_replace)
901 scrub_write_block_to_dev_replace(sblock_bad);
905 if (!sblock_bad->no_io_error_seen) {
906 spin_lock(&sctx->stat_lock);
907 sctx->stat.read_errors++;
908 spin_unlock(&sctx->stat_lock);
909 if (__ratelimit(&_rs))
910 scrub_print_warning("i/o error", sblock_to_check);
911 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
912 } else if (sblock_bad->checksum_error) {
913 spin_lock(&sctx->stat_lock);
914 sctx->stat.csum_errors++;
915 spin_unlock(&sctx->stat_lock);
916 if (__ratelimit(&_rs))
917 scrub_print_warning("checksum error", sblock_to_check);
918 btrfs_dev_stat_inc_and_print(dev,
919 BTRFS_DEV_STAT_CORRUPTION_ERRS);
920 } else if (sblock_bad->header_error) {
921 spin_lock(&sctx->stat_lock);
922 sctx->stat.verify_errors++;
923 spin_unlock(&sctx->stat_lock);
924 if (__ratelimit(&_rs))
925 scrub_print_warning("checksum/header error",
927 if (sblock_bad->generation_error)
928 btrfs_dev_stat_inc_and_print(dev,
929 BTRFS_DEV_STAT_GENERATION_ERRS);
931 btrfs_dev_stat_inc_and_print(dev,
932 BTRFS_DEV_STAT_CORRUPTION_ERRS);
935 if (sctx->readonly && !sctx->is_dev_replace)
936 goto did_not_correct_error;
938 if (!is_metadata && !have_csum) {
939 struct scrub_fixup_nodatasum *fixup_nodatasum;
942 WARN_ON(sctx->is_dev_replace);
945 * !is_metadata and !have_csum, this means that the data
946 * might not be COW'ed, that it might be modified
947 * concurrently. The general strategy to work on the
948 * commit root does not help in the case when COW is not
951 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
952 if (!fixup_nodatasum)
953 goto did_not_correct_error;
954 fixup_nodatasum->sctx = sctx;
955 fixup_nodatasum->dev = dev;
956 fixup_nodatasum->logical = logical;
957 fixup_nodatasum->root = fs_info->extent_root;
958 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
959 scrub_pending_trans_workers_inc(sctx);
960 fixup_nodatasum->work.func = scrub_fixup_nodatasum;
961 btrfs_queue_worker(&fs_info->scrub_workers,
962 &fixup_nodatasum->work);
967 * now build and submit the bios for the other mirrors, check
969 * First try to pick the mirror which is completely without I/O
970 * errors and also does not have a checksum error.
971 * If one is found, and if a checksum is present, the full block
972 * that is known to contain an error is rewritten. Afterwards
973 * the block is known to be corrected.
974 * If a mirror is found which is completely correct, and no
975 * checksum is present, only those pages are rewritten that had
976 * an I/O error in the block to be repaired, since it cannot be
977 * determined, which copy of the other pages is better (and it
978 * could happen otherwise that a correct page would be
979 * overwritten by a bad one).
981 for (mirror_index = 0;
982 mirror_index < BTRFS_MAX_MIRRORS &&
983 sblocks_for_recheck[mirror_index].page_count > 0;
985 struct scrub_block *sblock_other;
987 if (mirror_index == failed_mirror_index)
989 sblock_other = sblocks_for_recheck + mirror_index;
991 /* build and submit the bios, check checksums */
992 scrub_recheck_block(fs_info, sblock_other, is_metadata,
993 have_csum, csum, generation,
996 if (!sblock_other->header_error &&
997 !sblock_other->checksum_error &&
998 sblock_other->no_io_error_seen) {
999 if (sctx->is_dev_replace) {
1000 scrub_write_block_to_dev_replace(sblock_other);
1002 int force_write = is_metadata || have_csum;
1004 ret = scrub_repair_block_from_good_copy(
1005 sblock_bad, sblock_other,
1009 goto corrected_error;
1014 * for dev_replace, pick good pages and write to the target device.
1016 if (sctx->is_dev_replace) {
1018 for (page_num = 0; page_num < sblock_bad->page_count;
1023 for (mirror_index = 0;
1024 mirror_index < BTRFS_MAX_MIRRORS &&
1025 sblocks_for_recheck[mirror_index].page_count > 0;
1027 struct scrub_block *sblock_other =
1028 sblocks_for_recheck + mirror_index;
1029 struct scrub_page *page_other =
1030 sblock_other->pagev[page_num];
1032 if (!page_other->io_error) {
1033 ret = scrub_write_page_to_dev_replace(
1034 sblock_other, page_num);
1036 /* succeeded for this page */
1040 btrfs_dev_replace_stats_inc(
1042 fs_info->dev_replace.
1050 * did not find a mirror to fetch the page
1051 * from. scrub_write_page_to_dev_replace()
1052 * handles this case (page->io_error), by
1053 * filling the block with zeros before
1054 * submitting the write request
1057 ret = scrub_write_page_to_dev_replace(
1058 sblock_bad, page_num);
1060 btrfs_dev_replace_stats_inc(
1061 &sctx->dev_root->fs_info->
1062 dev_replace.num_write_errors);
1070 * for regular scrub, repair those pages that are errored.
1071 * In case of I/O errors in the area that is supposed to be
1072 * repaired, continue by picking good copies of those pages.
1073 * Select the good pages from mirrors to rewrite bad pages from
1074 * the area to fix. Afterwards verify the checksum of the block
1075 * that is supposed to be repaired. This verification step is
1076 * only done for the purpose of statistic counting and for the
1077 * final scrub report, whether errors remain.
1078 * A perfect algorithm could make use of the checksum and try
1079 * all possible combinations of pages from the different mirrors
1080 * until the checksum verification succeeds. For example, when
1081 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1082 * of mirror #2 is readable but the final checksum test fails,
1083 * then the 2nd page of mirror #3 could be tried, whether now
1084 * the final checksum succeedes. But this would be a rare
1085 * exception and is therefore not implemented. At least it is
1086 * avoided that the good copy is overwritten.
1087 * A more useful improvement would be to pick the sectors
1088 * without I/O error based on sector sizes (512 bytes on legacy
1089 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1090 * mirror could be repaired by taking 512 byte of a different
1091 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1092 * area are unreadable.
1095 /* can only fix I/O errors from here on */
1096 if (sblock_bad->no_io_error_seen)
1097 goto did_not_correct_error;
1100 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1101 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1103 if (!page_bad->io_error)
1106 for (mirror_index = 0;
1107 mirror_index < BTRFS_MAX_MIRRORS &&
1108 sblocks_for_recheck[mirror_index].page_count > 0;
1110 struct scrub_block *sblock_other = sblocks_for_recheck +
1112 struct scrub_page *page_other = sblock_other->pagev[
1115 if (!page_other->io_error) {
1116 ret = scrub_repair_page_from_good_copy(
1117 sblock_bad, sblock_other, page_num, 0);
1119 page_bad->io_error = 0;
1120 break; /* succeeded for this page */
1125 if (page_bad->io_error) {
1126 /* did not find a mirror to copy the page from */
1132 if (is_metadata || have_csum) {
1134 * need to verify the checksum now that all
1135 * sectors on disk are repaired (the write
1136 * request for data to be repaired is on its way).
1137 * Just be lazy and use scrub_recheck_block()
1138 * which re-reads the data before the checksum
1139 * is verified, but most likely the data comes out
1140 * of the page cache.
1142 scrub_recheck_block(fs_info, sblock_bad,
1143 is_metadata, have_csum, csum,
1144 generation, sctx->csum_size);
1145 if (!sblock_bad->header_error &&
1146 !sblock_bad->checksum_error &&
1147 sblock_bad->no_io_error_seen)
1148 goto corrected_error;
1150 goto did_not_correct_error;
1153 spin_lock(&sctx->stat_lock);
1154 sctx->stat.corrected_errors++;
1155 spin_unlock(&sctx->stat_lock);
1156 printk_ratelimited_in_rcu(KERN_ERR
1157 "btrfs: fixed up error at logical %llu on dev %s\n",
1158 (unsigned long long)logical,
1159 rcu_str_deref(dev->name));
1162 did_not_correct_error:
1163 spin_lock(&sctx->stat_lock);
1164 sctx->stat.uncorrectable_errors++;
1165 spin_unlock(&sctx->stat_lock);
1166 printk_ratelimited_in_rcu(KERN_ERR
1167 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
1168 (unsigned long long)logical,
1169 rcu_str_deref(dev->name));
1173 if (sblocks_for_recheck) {
1174 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1176 struct scrub_block *sblock = sblocks_for_recheck +
1180 for (page_index = 0; page_index < sblock->page_count;
1182 sblock->pagev[page_index]->sblock = NULL;
1183 scrub_page_put(sblock->pagev[page_index]);
1186 kfree(sblocks_for_recheck);
1192 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1193 struct btrfs_fs_info *fs_info,
1194 struct scrub_block *original_sblock,
1195 u64 length, u64 logical,
1196 struct scrub_block *sblocks_for_recheck)
1203 * note: the two members ref_count and outstanding_pages
1204 * are not used (and not set) in the blocks that are used for
1205 * the recheck procedure
1209 while (length > 0) {
1210 u64 sublen = min_t(u64, length, PAGE_SIZE);
1211 u64 mapped_length = sublen;
1212 struct btrfs_bio *bbio = NULL;
1215 * with a length of PAGE_SIZE, each returned stripe
1216 * represents one mirror
1218 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
1219 &mapped_length, &bbio, 0);
1220 if (ret || !bbio || mapped_length < sublen) {
1225 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1226 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1228 struct scrub_block *sblock;
1229 struct scrub_page *page;
1231 if (mirror_index >= BTRFS_MAX_MIRRORS)
1234 sblock = sblocks_for_recheck + mirror_index;
1235 sblock->sctx = sctx;
1236 page = kzalloc(sizeof(*page), GFP_NOFS);
1239 spin_lock(&sctx->stat_lock);
1240 sctx->stat.malloc_errors++;
1241 spin_unlock(&sctx->stat_lock);
1245 scrub_page_get(page);
1246 sblock->pagev[page_index] = page;
1247 page->logical = logical;
1248 page->physical = bbio->stripes[mirror_index].physical;
1249 BUG_ON(page_index >= original_sblock->page_count);
1250 page->physical_for_dev_replace =
1251 original_sblock->pagev[page_index]->
1252 physical_for_dev_replace;
1253 /* for missing devices, dev->bdev is NULL */
1254 page->dev = bbio->stripes[mirror_index].dev;
1255 page->mirror_num = mirror_index + 1;
1256 sblock->page_count++;
1257 page->page = alloc_page(GFP_NOFS);
1271 * this function will check the on disk data for checksum errors, header
1272 * errors and read I/O errors. If any I/O errors happen, the exact pages
1273 * which are errored are marked as being bad. The goal is to enable scrub
1274 * to take those pages that are not errored from all the mirrors so that
1275 * the pages that are errored in the just handled mirror can be repaired.
1277 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1278 struct scrub_block *sblock, int is_metadata,
1279 int have_csum, u8 *csum, u64 generation,
1284 sblock->no_io_error_seen = 1;
1285 sblock->header_error = 0;
1286 sblock->checksum_error = 0;
1288 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1290 struct scrub_page *page = sblock->pagev[page_num];
1291 DECLARE_COMPLETION_ONSTACK(complete);
1293 if (page->dev->bdev == NULL) {
1295 sblock->no_io_error_seen = 0;
1299 WARN_ON(!page->page);
1300 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1303 sblock->no_io_error_seen = 0;
1306 bio->bi_bdev = page->dev->bdev;
1307 bio->bi_sector = page->physical >> 9;
1308 bio->bi_end_io = scrub_complete_bio_end_io;
1309 bio->bi_private = &complete;
1311 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1312 btrfsic_submit_bio(READ, bio);
1314 /* this will also unplug the queue */
1315 wait_for_completion(&complete);
1317 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1318 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1319 sblock->no_io_error_seen = 0;
1323 if (sblock->no_io_error_seen)
1324 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1325 have_csum, csum, generation,
1331 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1332 struct scrub_block *sblock,
1333 int is_metadata, int have_csum,
1334 const u8 *csum, u64 generation,
1338 u8 calculated_csum[BTRFS_CSUM_SIZE];
1340 void *mapped_buffer;
1342 WARN_ON(!sblock->pagev[0]->page);
1344 struct btrfs_header *h;
1346 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1347 h = (struct btrfs_header *)mapped_buffer;
1349 if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr) ||
1350 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1351 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1353 sblock->header_error = 1;
1354 } else if (generation != le64_to_cpu(h->generation)) {
1355 sblock->header_error = 1;
1356 sblock->generation_error = 1;
1363 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1366 for (page_num = 0;;) {
1367 if (page_num == 0 && is_metadata)
1368 crc = btrfs_csum_data(
1369 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1370 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1372 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1374 kunmap_atomic(mapped_buffer);
1376 if (page_num >= sblock->page_count)
1378 WARN_ON(!sblock->pagev[page_num]->page);
1380 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1383 btrfs_csum_final(crc, calculated_csum);
1384 if (memcmp(calculated_csum, csum, csum_size))
1385 sblock->checksum_error = 1;
1388 static void scrub_complete_bio_end_io(struct bio *bio, int err)
1390 complete((struct completion *)bio->bi_private);
1393 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1394 struct scrub_block *sblock_good,
1400 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1403 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1414 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1415 struct scrub_block *sblock_good,
1416 int page_num, int force_write)
1418 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1419 struct scrub_page *page_good = sblock_good->pagev[page_num];
1421 BUG_ON(page_bad->page == NULL);
1422 BUG_ON(page_good->page == NULL);
1423 if (force_write || sblock_bad->header_error ||
1424 sblock_bad->checksum_error || page_bad->io_error) {
1427 DECLARE_COMPLETION_ONSTACK(complete);
1429 if (!page_bad->dev->bdev) {
1430 printk_ratelimited(KERN_WARNING
1431 "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
1435 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1438 bio->bi_bdev = page_bad->dev->bdev;
1439 bio->bi_sector = page_bad->physical >> 9;
1440 bio->bi_end_io = scrub_complete_bio_end_io;
1441 bio->bi_private = &complete;
1443 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1444 if (PAGE_SIZE != ret) {
1448 btrfsic_submit_bio(WRITE, bio);
1450 /* this will also unplug the queue */
1451 wait_for_completion(&complete);
1452 if (!bio_flagged(bio, BIO_UPTODATE)) {
1453 btrfs_dev_stat_inc_and_print(page_bad->dev,
1454 BTRFS_DEV_STAT_WRITE_ERRS);
1455 btrfs_dev_replace_stats_inc(
1456 &sblock_bad->sctx->dev_root->fs_info->
1457 dev_replace.num_write_errors);
1467 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1471 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1474 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1476 btrfs_dev_replace_stats_inc(
1477 &sblock->sctx->dev_root->fs_info->dev_replace.
1482 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1485 struct scrub_page *spage = sblock->pagev[page_num];
1487 BUG_ON(spage->page == NULL);
1488 if (spage->io_error) {
1489 void *mapped_buffer = kmap_atomic(spage->page);
1491 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1492 flush_dcache_page(spage->page);
1493 kunmap_atomic(mapped_buffer);
1495 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1498 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1499 struct scrub_page *spage)
1501 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1502 struct scrub_bio *sbio;
1505 mutex_lock(&wr_ctx->wr_lock);
1507 if (!wr_ctx->wr_curr_bio) {
1508 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1510 if (!wr_ctx->wr_curr_bio) {
1511 mutex_unlock(&wr_ctx->wr_lock);
1514 wr_ctx->wr_curr_bio->sctx = sctx;
1515 wr_ctx->wr_curr_bio->page_count = 0;
1517 sbio = wr_ctx->wr_curr_bio;
1518 if (sbio->page_count == 0) {
1521 sbio->physical = spage->physical_for_dev_replace;
1522 sbio->logical = spage->logical;
1523 sbio->dev = wr_ctx->tgtdev;
1526 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1528 mutex_unlock(&wr_ctx->wr_lock);
1534 bio->bi_private = sbio;
1535 bio->bi_end_io = scrub_wr_bio_end_io;
1536 bio->bi_bdev = sbio->dev->bdev;
1537 bio->bi_sector = sbio->physical >> 9;
1539 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1540 spage->physical_for_dev_replace ||
1541 sbio->logical + sbio->page_count * PAGE_SIZE !=
1543 scrub_wr_submit(sctx);
1547 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1548 if (ret != PAGE_SIZE) {
1549 if (sbio->page_count < 1) {
1552 mutex_unlock(&wr_ctx->wr_lock);
1555 scrub_wr_submit(sctx);
1559 sbio->pagev[sbio->page_count] = spage;
1560 scrub_page_get(spage);
1562 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1563 scrub_wr_submit(sctx);
1564 mutex_unlock(&wr_ctx->wr_lock);
1569 static void scrub_wr_submit(struct scrub_ctx *sctx)
1571 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1572 struct scrub_bio *sbio;
1574 if (!wr_ctx->wr_curr_bio)
1577 sbio = wr_ctx->wr_curr_bio;
1578 wr_ctx->wr_curr_bio = NULL;
1579 WARN_ON(!sbio->bio->bi_bdev);
1580 scrub_pending_bio_inc(sctx);
1581 /* process all writes in a single worker thread. Then the block layer
1582 * orders the requests before sending them to the driver which
1583 * doubled the write performance on spinning disks when measured
1585 btrfsic_submit_bio(WRITE, sbio->bio);
1588 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1590 struct scrub_bio *sbio = bio->bi_private;
1591 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1596 sbio->work.func = scrub_wr_bio_end_io_worker;
1597 btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work);
1600 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1602 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1603 struct scrub_ctx *sctx = sbio->sctx;
1606 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1608 struct btrfs_dev_replace *dev_replace =
1609 &sbio->sctx->dev_root->fs_info->dev_replace;
1611 for (i = 0; i < sbio->page_count; i++) {
1612 struct scrub_page *spage = sbio->pagev[i];
1614 spage->io_error = 1;
1615 btrfs_dev_replace_stats_inc(&dev_replace->
1620 for (i = 0; i < sbio->page_count; i++)
1621 scrub_page_put(sbio->pagev[i]);
1625 scrub_pending_bio_dec(sctx);
1628 static int scrub_checksum(struct scrub_block *sblock)
1633 WARN_ON(sblock->page_count < 1);
1634 flags = sblock->pagev[0]->flags;
1636 if (flags & BTRFS_EXTENT_FLAG_DATA)
1637 ret = scrub_checksum_data(sblock);
1638 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1639 ret = scrub_checksum_tree_block(sblock);
1640 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1641 (void)scrub_checksum_super(sblock);
1645 scrub_handle_errored_block(sblock);
1650 static int scrub_checksum_data(struct scrub_block *sblock)
1652 struct scrub_ctx *sctx = sblock->sctx;
1653 u8 csum[BTRFS_CSUM_SIZE];
1662 BUG_ON(sblock->page_count < 1);
1663 if (!sblock->pagev[0]->have_csum)
1666 on_disk_csum = sblock->pagev[0]->csum;
1667 page = sblock->pagev[0]->page;
1668 buffer = kmap_atomic(page);
1670 len = sctx->sectorsize;
1673 u64 l = min_t(u64, len, PAGE_SIZE);
1675 crc = btrfs_csum_data(buffer, crc, l);
1676 kunmap_atomic(buffer);
1681 BUG_ON(index >= sblock->page_count);
1682 BUG_ON(!sblock->pagev[index]->page);
1683 page = sblock->pagev[index]->page;
1684 buffer = kmap_atomic(page);
1687 btrfs_csum_final(crc, csum);
1688 if (memcmp(csum, on_disk_csum, sctx->csum_size))
1694 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1696 struct scrub_ctx *sctx = sblock->sctx;
1697 struct btrfs_header *h;
1698 struct btrfs_root *root = sctx->dev_root;
1699 struct btrfs_fs_info *fs_info = root->fs_info;
1700 u8 calculated_csum[BTRFS_CSUM_SIZE];
1701 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1703 void *mapped_buffer;
1712 BUG_ON(sblock->page_count < 1);
1713 page = sblock->pagev[0]->page;
1714 mapped_buffer = kmap_atomic(page);
1715 h = (struct btrfs_header *)mapped_buffer;
1716 memcpy(on_disk_csum, h->csum, sctx->csum_size);
1719 * we don't use the getter functions here, as we
1720 * a) don't have an extent buffer and
1721 * b) the page is already kmapped
1724 if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr))
1727 if (sblock->pagev[0]->generation != le64_to_cpu(h->generation))
1730 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1733 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1737 WARN_ON(sctx->nodesize != sctx->leafsize);
1738 len = sctx->nodesize - BTRFS_CSUM_SIZE;
1739 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1740 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1743 u64 l = min_t(u64, len, mapped_size);
1745 crc = btrfs_csum_data(p, crc, l);
1746 kunmap_atomic(mapped_buffer);
1751 BUG_ON(index >= sblock->page_count);
1752 BUG_ON(!sblock->pagev[index]->page);
1753 page = sblock->pagev[index]->page;
1754 mapped_buffer = kmap_atomic(page);
1755 mapped_size = PAGE_SIZE;
1759 btrfs_csum_final(crc, calculated_csum);
1760 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1763 return fail || crc_fail;
1766 static int scrub_checksum_super(struct scrub_block *sblock)
1768 struct btrfs_super_block *s;
1769 struct scrub_ctx *sctx = sblock->sctx;
1770 struct btrfs_root *root = sctx->dev_root;
1771 struct btrfs_fs_info *fs_info = root->fs_info;
1772 u8 calculated_csum[BTRFS_CSUM_SIZE];
1773 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1775 void *mapped_buffer;
1784 BUG_ON(sblock->page_count < 1);
1785 page = sblock->pagev[0]->page;
1786 mapped_buffer = kmap_atomic(page);
1787 s = (struct btrfs_super_block *)mapped_buffer;
1788 memcpy(on_disk_csum, s->csum, sctx->csum_size);
1790 if (sblock->pagev[0]->logical != le64_to_cpu(s->bytenr))
1793 if (sblock->pagev[0]->generation != le64_to_cpu(s->generation))
1796 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1799 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1800 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1801 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1804 u64 l = min_t(u64, len, mapped_size);
1806 crc = btrfs_csum_data(p, crc, l);
1807 kunmap_atomic(mapped_buffer);
1812 BUG_ON(index >= sblock->page_count);
1813 BUG_ON(!sblock->pagev[index]->page);
1814 page = sblock->pagev[index]->page;
1815 mapped_buffer = kmap_atomic(page);
1816 mapped_size = PAGE_SIZE;
1820 btrfs_csum_final(crc, calculated_csum);
1821 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1824 if (fail_cor + fail_gen) {
1826 * if we find an error in a super block, we just report it.
1827 * They will get written with the next transaction commit
1830 spin_lock(&sctx->stat_lock);
1831 ++sctx->stat.super_errors;
1832 spin_unlock(&sctx->stat_lock);
1834 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1835 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1837 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1838 BTRFS_DEV_STAT_GENERATION_ERRS);
1841 return fail_cor + fail_gen;
1844 static void scrub_block_get(struct scrub_block *sblock)
1846 atomic_inc(&sblock->ref_count);
1849 static void scrub_block_put(struct scrub_block *sblock)
1851 if (atomic_dec_and_test(&sblock->ref_count)) {
1854 for (i = 0; i < sblock->page_count; i++)
1855 scrub_page_put(sblock->pagev[i]);
1860 static void scrub_page_get(struct scrub_page *spage)
1862 atomic_inc(&spage->ref_count);
1865 static void scrub_page_put(struct scrub_page *spage)
1867 if (atomic_dec_and_test(&spage->ref_count)) {
1869 __free_page(spage->page);
1874 static void scrub_submit(struct scrub_ctx *sctx)
1876 struct scrub_bio *sbio;
1878 if (sctx->curr == -1)
1881 sbio = sctx->bios[sctx->curr];
1883 scrub_pending_bio_inc(sctx);
1885 if (!sbio->bio->bi_bdev) {
1887 * this case should not happen. If btrfs_map_block() is
1888 * wrong, it could happen for dev-replace operations on
1889 * missing devices when no mirrors are available, but in
1890 * this case it should already fail the mount.
1891 * This case is handled correctly (but _very_ slowly).
1893 printk_ratelimited(KERN_WARNING
1894 "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
1895 bio_endio(sbio->bio, -EIO);
1897 btrfsic_submit_bio(READ, sbio->bio);
1901 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
1902 struct scrub_page *spage)
1904 struct scrub_block *sblock = spage->sblock;
1905 struct scrub_bio *sbio;
1910 * grab a fresh bio or wait for one to become available
1912 while (sctx->curr == -1) {
1913 spin_lock(&sctx->list_lock);
1914 sctx->curr = sctx->first_free;
1915 if (sctx->curr != -1) {
1916 sctx->first_free = sctx->bios[sctx->curr]->next_free;
1917 sctx->bios[sctx->curr]->next_free = -1;
1918 sctx->bios[sctx->curr]->page_count = 0;
1919 spin_unlock(&sctx->list_lock);
1921 spin_unlock(&sctx->list_lock);
1922 wait_event(sctx->list_wait, sctx->first_free != -1);
1925 sbio = sctx->bios[sctx->curr];
1926 if (sbio->page_count == 0) {
1929 sbio->physical = spage->physical;
1930 sbio->logical = spage->logical;
1931 sbio->dev = spage->dev;
1934 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
1940 bio->bi_private = sbio;
1941 bio->bi_end_io = scrub_bio_end_io;
1942 bio->bi_bdev = sbio->dev->bdev;
1943 bio->bi_sector = sbio->physical >> 9;
1945 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1947 sbio->logical + sbio->page_count * PAGE_SIZE !=
1949 sbio->dev != spage->dev) {
1954 sbio->pagev[sbio->page_count] = spage;
1955 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1956 if (ret != PAGE_SIZE) {
1957 if (sbio->page_count < 1) {
1966 scrub_block_get(sblock); /* one for the page added to the bio */
1967 atomic_inc(&sblock->outstanding_pages);
1969 if (sbio->page_count == sctx->pages_per_rd_bio)
1975 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
1976 u64 physical, struct btrfs_device *dev, u64 flags,
1977 u64 gen, int mirror_num, u8 *csum, int force,
1978 u64 physical_for_dev_replace)
1980 struct scrub_block *sblock;
1983 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1985 spin_lock(&sctx->stat_lock);
1986 sctx->stat.malloc_errors++;
1987 spin_unlock(&sctx->stat_lock);
1991 /* one ref inside this function, plus one for each page added to
1993 atomic_set(&sblock->ref_count, 1);
1994 sblock->sctx = sctx;
1995 sblock->no_io_error_seen = 1;
1997 for (index = 0; len > 0; index++) {
1998 struct scrub_page *spage;
1999 u64 l = min_t(u64, len, PAGE_SIZE);
2001 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2004 spin_lock(&sctx->stat_lock);
2005 sctx->stat.malloc_errors++;
2006 spin_unlock(&sctx->stat_lock);
2007 scrub_block_put(sblock);
2010 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2011 scrub_page_get(spage);
2012 sblock->pagev[index] = spage;
2013 spage->sblock = sblock;
2015 spage->flags = flags;
2016 spage->generation = gen;
2017 spage->logical = logical;
2018 spage->physical = physical;
2019 spage->physical_for_dev_replace = physical_for_dev_replace;
2020 spage->mirror_num = mirror_num;
2022 spage->have_csum = 1;
2023 memcpy(spage->csum, csum, sctx->csum_size);
2025 spage->have_csum = 0;
2027 sblock->page_count++;
2028 spage->page = alloc_page(GFP_NOFS);
2034 physical_for_dev_replace += l;
2037 WARN_ON(sblock->page_count == 0);
2038 for (index = 0; index < sblock->page_count; index++) {
2039 struct scrub_page *spage = sblock->pagev[index];
2042 ret = scrub_add_page_to_rd_bio(sctx, spage);
2044 scrub_block_put(sblock);
2052 /* last one frees, either here or in bio completion for last page */
2053 scrub_block_put(sblock);
2057 static void scrub_bio_end_io(struct bio *bio, int err)
2059 struct scrub_bio *sbio = bio->bi_private;
2060 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2065 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
2068 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2070 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2071 struct scrub_ctx *sctx = sbio->sctx;
2074 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2076 for (i = 0; i < sbio->page_count; i++) {
2077 struct scrub_page *spage = sbio->pagev[i];
2079 spage->io_error = 1;
2080 spage->sblock->no_io_error_seen = 0;
2084 /* now complete the scrub_block items that have all pages completed */
2085 for (i = 0; i < sbio->page_count; i++) {
2086 struct scrub_page *spage = sbio->pagev[i];
2087 struct scrub_block *sblock = spage->sblock;
2089 if (atomic_dec_and_test(&sblock->outstanding_pages))
2090 scrub_block_complete(sblock);
2091 scrub_block_put(sblock);
2096 spin_lock(&sctx->list_lock);
2097 sbio->next_free = sctx->first_free;
2098 sctx->first_free = sbio->index;
2099 spin_unlock(&sctx->list_lock);
2101 if (sctx->is_dev_replace &&
2102 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2103 mutex_lock(&sctx->wr_ctx.wr_lock);
2104 scrub_wr_submit(sctx);
2105 mutex_unlock(&sctx->wr_ctx.wr_lock);
2108 scrub_pending_bio_dec(sctx);
2111 static void scrub_block_complete(struct scrub_block *sblock)
2113 if (!sblock->no_io_error_seen) {
2114 scrub_handle_errored_block(sblock);
2117 * if has checksum error, write via repair mechanism in
2118 * dev replace case, otherwise write here in dev replace
2121 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
2122 scrub_write_block_to_dev_replace(sblock);
2126 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2129 struct btrfs_ordered_sum *sum = NULL;
2132 unsigned long num_sectors;
2134 while (!list_empty(&sctx->csum_list)) {
2135 sum = list_first_entry(&sctx->csum_list,
2136 struct btrfs_ordered_sum, list);
2137 if (sum->bytenr > logical)
2139 if (sum->bytenr + sum->len > logical)
2142 ++sctx->stat.csum_discards;
2143 list_del(&sum->list);
2150 num_sectors = sum->len / sctx->sectorsize;
2151 for (i = 0; i < num_sectors; ++i) {
2152 if (sum->sums[i].bytenr == logical) {
2153 memcpy(csum, &sum->sums[i].sum, sctx->csum_size);
2158 if (ret && i == num_sectors - 1) {
2159 list_del(&sum->list);
2165 /* scrub extent tries to collect up to 64 kB for each bio */
2166 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2167 u64 physical, struct btrfs_device *dev, u64 flags,
2168 u64 gen, int mirror_num, u64 physical_for_dev_replace)
2171 u8 csum[BTRFS_CSUM_SIZE];
2174 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2175 blocksize = sctx->sectorsize;
2176 spin_lock(&sctx->stat_lock);
2177 sctx->stat.data_extents_scrubbed++;
2178 sctx->stat.data_bytes_scrubbed += len;
2179 spin_unlock(&sctx->stat_lock);
2180 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2181 WARN_ON(sctx->nodesize != sctx->leafsize);
2182 blocksize = sctx->nodesize;
2183 spin_lock(&sctx->stat_lock);
2184 sctx->stat.tree_extents_scrubbed++;
2185 sctx->stat.tree_bytes_scrubbed += len;
2186 spin_unlock(&sctx->stat_lock);
2188 blocksize = sctx->sectorsize;
2193 u64 l = min_t(u64, len, blocksize);
2196 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2197 /* push csums to sbio */
2198 have_csum = scrub_find_csum(sctx, logical, l, csum);
2200 ++sctx->stat.no_csum;
2201 if (sctx->is_dev_replace && !have_csum) {
2202 ret = copy_nocow_pages(sctx, logical, l,
2204 physical_for_dev_replace);
2205 goto behind_scrub_pages;
2208 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2209 mirror_num, have_csum ? csum : NULL, 0,
2210 physical_for_dev_replace);
2217 physical_for_dev_replace += l;
2222 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2223 struct map_lookup *map,
2224 struct btrfs_device *scrub_dev,
2225 int num, u64 base, u64 length,
2228 struct btrfs_path *path;
2229 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2230 struct btrfs_root *root = fs_info->extent_root;
2231 struct btrfs_root *csum_root = fs_info->csum_root;
2232 struct btrfs_extent_item *extent;
2233 struct blk_plug plug;
2238 struct extent_buffer *l;
2239 struct btrfs_key key;
2245 struct reada_control *reada1;
2246 struct reada_control *reada2;
2247 struct btrfs_key key_start;
2248 struct btrfs_key key_end;
2249 u64 increment = map->stripe_len;
2252 u64 extent_physical;
2254 struct btrfs_device *extent_dev;
2255 int extent_mirror_num;
2258 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2259 BTRFS_BLOCK_GROUP_RAID6)) {
2260 if (num >= nr_data_stripes(map)) {
2267 do_div(nstripes, map->stripe_len);
2268 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2269 offset = map->stripe_len * num;
2270 increment = map->stripe_len * map->num_stripes;
2272 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2273 int factor = map->num_stripes / map->sub_stripes;
2274 offset = map->stripe_len * (num / map->sub_stripes);
2275 increment = map->stripe_len * factor;
2276 mirror_num = num % map->sub_stripes + 1;
2277 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2278 increment = map->stripe_len;
2279 mirror_num = num % map->num_stripes + 1;
2280 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2281 increment = map->stripe_len;
2282 mirror_num = num % map->num_stripes + 1;
2284 increment = map->stripe_len;
2288 path = btrfs_alloc_path();
2293 * work on commit root. The related disk blocks are static as
2294 * long as COW is applied. This means, it is save to rewrite
2295 * them to repair disk errors without any race conditions
2297 path->search_commit_root = 1;
2298 path->skip_locking = 1;
2301 * trigger the readahead for extent tree csum tree and wait for
2302 * completion. During readahead, the scrub is officially paused
2303 * to not hold off transaction commits
2305 logical = base + offset;
2307 wait_event(sctx->list_wait,
2308 atomic_read(&sctx->bios_in_flight) == 0);
2309 atomic_inc(&fs_info->scrubs_paused);
2310 wake_up(&fs_info->scrub_pause_wait);
2312 /* FIXME it might be better to start readahead at commit root */
2313 key_start.objectid = logical;
2314 key_start.type = BTRFS_EXTENT_ITEM_KEY;
2315 key_start.offset = (u64)0;
2316 key_end.objectid = base + offset + nstripes * increment;
2317 key_end.type = BTRFS_METADATA_ITEM_KEY;
2318 key_end.offset = (u64)-1;
2319 reada1 = btrfs_reada_add(root, &key_start, &key_end);
2321 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2322 key_start.type = BTRFS_EXTENT_CSUM_KEY;
2323 key_start.offset = logical;
2324 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2325 key_end.type = BTRFS_EXTENT_CSUM_KEY;
2326 key_end.offset = base + offset + nstripes * increment;
2327 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
2329 if (!IS_ERR(reada1))
2330 btrfs_reada_wait(reada1);
2331 if (!IS_ERR(reada2))
2332 btrfs_reada_wait(reada2);
2334 mutex_lock(&fs_info->scrub_lock);
2335 while (atomic_read(&fs_info->scrub_pause_req)) {
2336 mutex_unlock(&fs_info->scrub_lock);
2337 wait_event(fs_info->scrub_pause_wait,
2338 atomic_read(&fs_info->scrub_pause_req) == 0);
2339 mutex_lock(&fs_info->scrub_lock);
2341 atomic_dec(&fs_info->scrubs_paused);
2342 mutex_unlock(&fs_info->scrub_lock);
2343 wake_up(&fs_info->scrub_pause_wait);
2346 * collect all data csums for the stripe to avoid seeking during
2347 * the scrub. This might currently (crc32) end up to be about 1MB
2349 blk_start_plug(&plug);
2352 * now find all extents for each stripe and scrub them
2354 logical = base + offset;
2355 physical = map->stripes[num].physical;
2356 logic_end = logical + increment * nstripes;
2358 while (logical < logic_end) {
2362 if (atomic_read(&fs_info->scrub_cancel_req) ||
2363 atomic_read(&sctx->cancel_req)) {
2368 * check to see if we have to pause
2370 if (atomic_read(&fs_info->scrub_pause_req)) {
2371 /* push queued extents */
2372 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2374 mutex_lock(&sctx->wr_ctx.wr_lock);
2375 scrub_wr_submit(sctx);
2376 mutex_unlock(&sctx->wr_ctx.wr_lock);
2377 wait_event(sctx->list_wait,
2378 atomic_read(&sctx->bios_in_flight) == 0);
2379 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2380 atomic_inc(&fs_info->scrubs_paused);
2381 wake_up(&fs_info->scrub_pause_wait);
2382 mutex_lock(&fs_info->scrub_lock);
2383 while (atomic_read(&fs_info->scrub_pause_req)) {
2384 mutex_unlock(&fs_info->scrub_lock);
2385 wait_event(fs_info->scrub_pause_wait,
2386 atomic_read(&fs_info->scrub_pause_req) == 0);
2387 mutex_lock(&fs_info->scrub_lock);
2389 atomic_dec(&fs_info->scrubs_paused);
2390 mutex_unlock(&fs_info->scrub_lock);
2391 wake_up(&fs_info->scrub_pause_wait);
2394 key.objectid = logical;
2395 key.type = BTRFS_EXTENT_ITEM_KEY;
2396 key.offset = (u64)-1;
2398 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2403 ret = btrfs_previous_item(root, path, 0,
2404 BTRFS_EXTENT_ITEM_KEY);
2408 /* there's no smaller item, so stick with the
2410 btrfs_release_path(path);
2411 ret = btrfs_search_slot(NULL, root, &key,
2423 slot = path->slots[0];
2424 if (slot >= btrfs_header_nritems(l)) {
2425 ret = btrfs_next_leaf(root, path);
2434 btrfs_item_key_to_cpu(l, &key, slot);
2436 if (key.type == BTRFS_METADATA_ITEM_KEY)
2437 bytes = root->leafsize;
2441 if (key.objectid + bytes <= logical)
2444 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2445 key.type != BTRFS_METADATA_ITEM_KEY)
2448 if (key.objectid >= logical + map->stripe_len) {
2449 /* out of this device extent */
2450 if (key.objectid >= logic_end)
2455 extent = btrfs_item_ptr(l, slot,
2456 struct btrfs_extent_item);
2457 flags = btrfs_extent_flags(l, extent);
2458 generation = btrfs_extent_generation(l, extent);
2460 if (key.objectid < logical &&
2461 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2463 "btrfs scrub: tree block %llu spanning "
2464 "stripes, ignored. logical=%llu\n",
2465 (unsigned long long)key.objectid,
2466 (unsigned long long)logical);
2471 extent_logical = key.objectid;
2475 * trim extent to this stripe
2477 if (extent_logical < logical) {
2478 extent_len -= logical - extent_logical;
2479 extent_logical = logical;
2481 if (extent_logical + extent_len >
2482 logical + map->stripe_len) {
2483 extent_len = logical + map->stripe_len -
2487 extent_physical = extent_logical - logical + physical;
2488 extent_dev = scrub_dev;
2489 extent_mirror_num = mirror_num;
2491 scrub_remap_extent(fs_info, extent_logical,
2492 extent_len, &extent_physical,
2494 &extent_mirror_num);
2496 ret = btrfs_lookup_csums_range(csum_root, logical,
2497 logical + map->stripe_len - 1,
2498 &sctx->csum_list, 1);
2502 ret = scrub_extent(sctx, extent_logical, extent_len,
2503 extent_physical, extent_dev, flags,
2504 generation, extent_mirror_num,
2505 extent_logical - logical + physical);
2509 if (extent_logical + extent_len <
2510 key.objectid + bytes) {
2511 logical += increment;
2512 physical += map->stripe_len;
2514 if (logical < key.objectid + bytes) {
2519 if (logical >= logic_end) {
2527 btrfs_release_path(path);
2528 logical += increment;
2529 physical += map->stripe_len;
2530 spin_lock(&sctx->stat_lock);
2532 sctx->stat.last_physical = map->stripes[num].physical +
2535 sctx->stat.last_physical = physical;
2536 spin_unlock(&sctx->stat_lock);
2541 /* push queued extents */
2543 mutex_lock(&sctx->wr_ctx.wr_lock);
2544 scrub_wr_submit(sctx);
2545 mutex_unlock(&sctx->wr_ctx.wr_lock);
2547 blk_finish_plug(&plug);
2548 btrfs_free_path(path);
2549 return ret < 0 ? ret : 0;
2552 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2553 struct btrfs_device *scrub_dev,
2554 u64 chunk_tree, u64 chunk_objectid,
2555 u64 chunk_offset, u64 length,
2556 u64 dev_offset, int is_dev_replace)
2558 struct btrfs_mapping_tree *map_tree =
2559 &sctx->dev_root->fs_info->mapping_tree;
2560 struct map_lookup *map;
2561 struct extent_map *em;
2565 read_lock(&map_tree->map_tree.lock);
2566 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2567 read_unlock(&map_tree->map_tree.lock);
2572 map = (struct map_lookup *)em->bdev;
2573 if (em->start != chunk_offset)
2576 if (em->len < length)
2579 for (i = 0; i < map->num_stripes; ++i) {
2580 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2581 map->stripes[i].physical == dev_offset) {
2582 ret = scrub_stripe(sctx, map, scrub_dev, i,
2583 chunk_offset, length,
2590 free_extent_map(em);
2595 static noinline_for_stack
2596 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2597 struct btrfs_device *scrub_dev, u64 start, u64 end,
2600 struct btrfs_dev_extent *dev_extent = NULL;
2601 struct btrfs_path *path;
2602 struct btrfs_root *root = sctx->dev_root;
2603 struct btrfs_fs_info *fs_info = root->fs_info;
2610 struct extent_buffer *l;
2611 struct btrfs_key key;
2612 struct btrfs_key found_key;
2613 struct btrfs_block_group_cache *cache;
2614 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2616 path = btrfs_alloc_path();
2621 path->search_commit_root = 1;
2622 path->skip_locking = 1;
2624 key.objectid = scrub_dev->devid;
2626 key.type = BTRFS_DEV_EXTENT_KEY;
2629 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2633 if (path->slots[0] >=
2634 btrfs_header_nritems(path->nodes[0])) {
2635 ret = btrfs_next_leaf(root, path);
2642 slot = path->slots[0];
2644 btrfs_item_key_to_cpu(l, &found_key, slot);
2646 if (found_key.objectid != scrub_dev->devid)
2649 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2652 if (found_key.offset >= end)
2655 if (found_key.offset < key.offset)
2658 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2659 length = btrfs_dev_extent_length(l, dev_extent);
2661 if (found_key.offset + length <= start) {
2662 key.offset = found_key.offset + length;
2663 btrfs_release_path(path);
2667 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2668 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2669 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2672 * get a reference on the corresponding block group to prevent
2673 * the chunk from going away while we scrub it
2675 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2680 dev_replace->cursor_right = found_key.offset + length;
2681 dev_replace->cursor_left = found_key.offset;
2682 dev_replace->item_needs_writeback = 1;
2683 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
2684 chunk_offset, length, found_key.offset,
2688 * flush, submit all pending read and write bios, afterwards
2690 * Note that in the dev replace case, a read request causes
2691 * write requests that are submitted in the read completion
2692 * worker. Therefore in the current situation, it is required
2693 * that all write requests are flushed, so that all read and
2694 * write requests are really completed when bios_in_flight
2697 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2699 mutex_lock(&sctx->wr_ctx.wr_lock);
2700 scrub_wr_submit(sctx);
2701 mutex_unlock(&sctx->wr_ctx.wr_lock);
2703 wait_event(sctx->list_wait,
2704 atomic_read(&sctx->bios_in_flight) == 0);
2705 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2706 atomic_inc(&fs_info->scrubs_paused);
2707 wake_up(&fs_info->scrub_pause_wait);
2708 wait_event(sctx->list_wait,
2709 atomic_read(&sctx->workers_pending) == 0);
2711 mutex_lock(&fs_info->scrub_lock);
2712 while (atomic_read(&fs_info->scrub_pause_req)) {
2713 mutex_unlock(&fs_info->scrub_lock);
2714 wait_event(fs_info->scrub_pause_wait,
2715 atomic_read(&fs_info->scrub_pause_req) == 0);
2716 mutex_lock(&fs_info->scrub_lock);
2718 atomic_dec(&fs_info->scrubs_paused);
2719 mutex_unlock(&fs_info->scrub_lock);
2720 wake_up(&fs_info->scrub_pause_wait);
2722 dev_replace->cursor_left = dev_replace->cursor_right;
2723 dev_replace->item_needs_writeback = 1;
2724 btrfs_put_block_group(cache);
2727 if (is_dev_replace &&
2728 atomic64_read(&dev_replace->num_write_errors) > 0) {
2732 if (sctx->stat.malloc_errors > 0) {
2737 key.offset = found_key.offset + length;
2738 btrfs_release_path(path);
2741 btrfs_free_path(path);
2744 * ret can still be 1 from search_slot or next_leaf,
2745 * that's not an error
2747 return ret < 0 ? ret : 0;
2750 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2751 struct btrfs_device *scrub_dev)
2757 struct btrfs_root *root = sctx->dev_root;
2759 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
2762 gen = root->fs_info->last_trans_committed;
2764 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2765 bytenr = btrfs_sb_offset(i);
2766 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
2769 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2770 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
2775 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2781 * get a reference count on fs_info->scrub_workers. start worker if necessary
2783 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
2788 mutex_lock(&fs_info->scrub_lock);
2789 if (fs_info->scrub_workers_refcnt == 0) {
2791 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1,
2792 &fs_info->generic_worker);
2794 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2795 fs_info->thread_pool_size,
2796 &fs_info->generic_worker);
2797 fs_info->scrub_workers.idle_thresh = 4;
2798 ret = btrfs_start_workers(&fs_info->scrub_workers);
2801 btrfs_init_workers(&fs_info->scrub_wr_completion_workers,
2803 fs_info->thread_pool_size,
2804 &fs_info->generic_worker);
2805 fs_info->scrub_wr_completion_workers.idle_thresh = 2;
2806 ret = btrfs_start_workers(
2807 &fs_info->scrub_wr_completion_workers);
2810 btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1,
2811 &fs_info->generic_worker);
2812 ret = btrfs_start_workers(&fs_info->scrub_nocow_workers);
2816 ++fs_info->scrub_workers_refcnt;
2818 mutex_unlock(&fs_info->scrub_lock);
2823 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
2825 mutex_lock(&fs_info->scrub_lock);
2826 if (--fs_info->scrub_workers_refcnt == 0) {
2827 btrfs_stop_workers(&fs_info->scrub_workers);
2828 btrfs_stop_workers(&fs_info->scrub_wr_completion_workers);
2829 btrfs_stop_workers(&fs_info->scrub_nocow_workers);
2831 WARN_ON(fs_info->scrub_workers_refcnt < 0);
2832 mutex_unlock(&fs_info->scrub_lock);
2835 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2836 u64 end, struct btrfs_scrub_progress *progress,
2837 int readonly, int is_dev_replace)
2839 struct scrub_ctx *sctx;
2841 struct btrfs_device *dev;
2843 if (btrfs_fs_closing(fs_info))
2847 * check some assumptions
2849 if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
2851 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2852 fs_info->chunk_root->nodesize,
2853 fs_info->chunk_root->leafsize);
2857 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
2859 * in this case scrub is unable to calculate the checksum
2860 * the way scrub is implemented. Do not handle this
2861 * situation at all because it won't ever happen.
2864 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2865 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
2869 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
2870 /* not supported for data w/o checksums */
2872 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2873 fs_info->chunk_root->sectorsize,
2874 (unsigned long long)PAGE_SIZE);
2878 if (fs_info->chunk_root->nodesize >
2879 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
2880 fs_info->chunk_root->sectorsize >
2881 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
2883 * would exhaust the array bounds of pagev member in
2884 * struct scrub_block
2886 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
2887 fs_info->chunk_root->nodesize,
2888 SCRUB_MAX_PAGES_PER_BLOCK,
2889 fs_info->chunk_root->sectorsize,
2890 SCRUB_MAX_PAGES_PER_BLOCK);
2894 ret = scrub_workers_get(fs_info, is_dev_replace);
2898 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2899 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
2900 if (!dev || (dev->missing && !is_dev_replace)) {
2901 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2902 scrub_workers_put(fs_info);
2905 mutex_lock(&fs_info->scrub_lock);
2907 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
2908 mutex_unlock(&fs_info->scrub_lock);
2909 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2910 scrub_workers_put(fs_info);
2914 btrfs_dev_replace_lock(&fs_info->dev_replace);
2915 if (dev->scrub_device ||
2917 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2918 btrfs_dev_replace_unlock(&fs_info->dev_replace);
2919 mutex_unlock(&fs_info->scrub_lock);
2920 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2921 scrub_workers_put(fs_info);
2922 return -EINPROGRESS;
2924 btrfs_dev_replace_unlock(&fs_info->dev_replace);
2925 sctx = scrub_setup_ctx(dev, is_dev_replace);
2927 mutex_unlock(&fs_info->scrub_lock);
2928 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2929 scrub_workers_put(fs_info);
2930 return PTR_ERR(sctx);
2932 sctx->readonly = readonly;
2933 dev->scrub_device = sctx;
2935 atomic_inc(&fs_info->scrubs_running);
2936 mutex_unlock(&fs_info->scrub_lock);
2937 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2939 if (!is_dev_replace) {
2940 down_read(&fs_info->scrub_super_lock);
2941 ret = scrub_supers(sctx, dev);
2942 up_read(&fs_info->scrub_super_lock);
2946 ret = scrub_enumerate_chunks(sctx, dev, start, end,
2949 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2950 atomic_dec(&fs_info->scrubs_running);
2951 wake_up(&fs_info->scrub_pause_wait);
2953 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
2956 memcpy(progress, &sctx->stat, sizeof(*progress));
2958 mutex_lock(&fs_info->scrub_lock);
2959 dev->scrub_device = NULL;
2960 mutex_unlock(&fs_info->scrub_lock);
2962 scrub_free_ctx(sctx);
2963 scrub_workers_put(fs_info);
2968 void btrfs_scrub_pause(struct btrfs_root *root)
2970 struct btrfs_fs_info *fs_info = root->fs_info;
2972 mutex_lock(&fs_info->scrub_lock);
2973 atomic_inc(&fs_info->scrub_pause_req);
2974 while (atomic_read(&fs_info->scrubs_paused) !=
2975 atomic_read(&fs_info->scrubs_running)) {
2976 mutex_unlock(&fs_info->scrub_lock);
2977 wait_event(fs_info->scrub_pause_wait,
2978 atomic_read(&fs_info->scrubs_paused) ==
2979 atomic_read(&fs_info->scrubs_running));
2980 mutex_lock(&fs_info->scrub_lock);
2982 mutex_unlock(&fs_info->scrub_lock);
2985 void btrfs_scrub_continue(struct btrfs_root *root)
2987 struct btrfs_fs_info *fs_info = root->fs_info;
2989 atomic_dec(&fs_info->scrub_pause_req);
2990 wake_up(&fs_info->scrub_pause_wait);
2993 void btrfs_scrub_pause_super(struct btrfs_root *root)
2995 down_write(&root->fs_info->scrub_super_lock);
2998 void btrfs_scrub_continue_super(struct btrfs_root *root)
3000 up_write(&root->fs_info->scrub_super_lock);
3003 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3005 mutex_lock(&fs_info->scrub_lock);
3006 if (!atomic_read(&fs_info->scrubs_running)) {
3007 mutex_unlock(&fs_info->scrub_lock);
3011 atomic_inc(&fs_info->scrub_cancel_req);
3012 while (atomic_read(&fs_info->scrubs_running)) {
3013 mutex_unlock(&fs_info->scrub_lock);
3014 wait_event(fs_info->scrub_pause_wait,
3015 atomic_read(&fs_info->scrubs_running) == 0);
3016 mutex_lock(&fs_info->scrub_lock);
3018 atomic_dec(&fs_info->scrub_cancel_req);
3019 mutex_unlock(&fs_info->scrub_lock);
3024 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3025 struct btrfs_device *dev)
3027 struct scrub_ctx *sctx;
3029 mutex_lock(&fs_info->scrub_lock);
3030 sctx = dev->scrub_device;
3032 mutex_unlock(&fs_info->scrub_lock);
3035 atomic_inc(&sctx->cancel_req);
3036 while (dev->scrub_device) {
3037 mutex_unlock(&fs_info->scrub_lock);
3038 wait_event(fs_info->scrub_pause_wait,
3039 dev->scrub_device == NULL);
3040 mutex_lock(&fs_info->scrub_lock);
3042 mutex_unlock(&fs_info->scrub_lock);
3047 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3048 struct btrfs_scrub_progress *progress)
3050 struct btrfs_device *dev;
3051 struct scrub_ctx *sctx = NULL;
3053 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3054 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3056 sctx = dev->scrub_device;
3058 memcpy(progress, &sctx->stat, sizeof(*progress));
3059 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3061 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3064 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3065 u64 extent_logical, u64 extent_len,
3066 u64 *extent_physical,
3067 struct btrfs_device **extent_dev,
3068 int *extent_mirror_num)
3071 struct btrfs_bio *bbio = NULL;
3074 mapped_length = extent_len;
3075 ret = btrfs_map_block(fs_info, READ, extent_logical,
3076 &mapped_length, &bbio, 0);
3077 if (ret || !bbio || mapped_length < extent_len ||
3078 !bbio->stripes[0].dev->bdev) {
3083 *extent_physical = bbio->stripes[0].physical;
3084 *extent_mirror_num = bbio->mirror_num;
3085 *extent_dev = bbio->stripes[0].dev;
3089 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3090 struct scrub_wr_ctx *wr_ctx,
3091 struct btrfs_fs_info *fs_info,
3092 struct btrfs_device *dev,
3095 WARN_ON(wr_ctx->wr_curr_bio != NULL);
3097 mutex_init(&wr_ctx->wr_lock);
3098 wr_ctx->wr_curr_bio = NULL;
3099 if (!is_dev_replace)
3102 WARN_ON(!dev->bdev);
3103 wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3104 bio_get_nr_vecs(dev->bdev));
3105 wr_ctx->tgtdev = dev;
3106 atomic_set(&wr_ctx->flush_all_writes, 0);
3110 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3112 mutex_lock(&wr_ctx->wr_lock);
3113 kfree(wr_ctx->wr_curr_bio);
3114 wr_ctx->wr_curr_bio = NULL;
3115 mutex_unlock(&wr_ctx->wr_lock);
3118 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3119 int mirror_num, u64 physical_for_dev_replace)
3121 struct scrub_copy_nocow_ctx *nocow_ctx;
3122 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3124 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3126 spin_lock(&sctx->stat_lock);
3127 sctx->stat.malloc_errors++;
3128 spin_unlock(&sctx->stat_lock);
3132 scrub_pending_trans_workers_inc(sctx);
3134 nocow_ctx->sctx = sctx;
3135 nocow_ctx->logical = logical;
3136 nocow_ctx->len = len;
3137 nocow_ctx->mirror_num = mirror_num;
3138 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3139 nocow_ctx->work.func = copy_nocow_pages_worker;
3140 btrfs_queue_worker(&fs_info->scrub_nocow_workers,
3146 static void copy_nocow_pages_worker(struct btrfs_work *work)
3148 struct scrub_copy_nocow_ctx *nocow_ctx =
3149 container_of(work, struct scrub_copy_nocow_ctx, work);
3150 struct scrub_ctx *sctx = nocow_ctx->sctx;
3151 u64 logical = nocow_ctx->logical;
3152 u64 len = nocow_ctx->len;
3153 int mirror_num = nocow_ctx->mirror_num;
3154 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3156 struct btrfs_trans_handle *trans = NULL;
3157 struct btrfs_fs_info *fs_info;
3158 struct btrfs_path *path;
3159 struct btrfs_root *root;
3160 int not_written = 0;
3162 fs_info = sctx->dev_root->fs_info;
3163 root = fs_info->extent_root;
3165 path = btrfs_alloc_path();
3167 spin_lock(&sctx->stat_lock);
3168 sctx->stat.malloc_errors++;
3169 spin_unlock(&sctx->stat_lock);
3174 trans = btrfs_join_transaction(root);
3175 if (IS_ERR(trans)) {
3180 ret = iterate_inodes_from_logical(logical, fs_info, path,
3181 copy_nocow_pages_for_inode,
3183 if (ret != 0 && ret != -ENOENT) {
3184 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %llu, ret %d\n",
3185 (unsigned long long)logical,
3186 (unsigned long long)physical_for_dev_replace,
3187 (unsigned long long)len,
3188 (unsigned long long)mirror_num, ret);
3194 if (trans && !IS_ERR(trans))
3195 btrfs_end_transaction(trans, root);
3197 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
3198 num_uncorrectable_read_errors);
3200 btrfs_free_path(path);
3203 scrub_pending_trans_workers_dec(sctx);
3206 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
3208 unsigned long index;
3209 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3211 struct btrfs_key key;
3212 struct inode *inode = NULL;
3213 struct btrfs_root *local_root;
3214 u64 physical_for_dev_replace;
3216 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
3219 key.objectid = root;
3220 key.type = BTRFS_ROOT_ITEM_KEY;
3221 key.offset = (u64)-1;
3223 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
3225 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
3226 if (IS_ERR(local_root)) {
3227 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3228 return PTR_ERR(local_root);
3231 key.type = BTRFS_INODE_ITEM_KEY;
3232 key.objectid = inum;
3234 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
3235 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3237 return PTR_ERR(inode);
3239 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3240 len = nocow_ctx->len;
3241 while (len >= PAGE_CACHE_SIZE) {
3242 struct page *page = NULL;
3245 index = offset >> PAGE_CACHE_SHIFT;
3247 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3249 pr_err("find_or_create_page() failed\n");
3254 if (PageUptodate(page)) {
3255 if (PageDirty(page))
3258 ClearPageError(page);
3259 ret_sub = extent_read_full_page(&BTRFS_I(inode)->
3261 page, btrfs_get_extent,
3262 nocow_ctx->mirror_num);
3267 wait_on_page_locked(page);
3268 if (!PageUptodate(page)) {
3273 ret_sub = write_page_nocow(nocow_ctx->sctx,
3274 physical_for_dev_replace, page);
3285 offset += PAGE_CACHE_SIZE;
3286 physical_for_dev_replace += PAGE_CACHE_SIZE;
3287 len -= PAGE_CACHE_SIZE;
3295 static int write_page_nocow(struct scrub_ctx *sctx,
3296 u64 physical_for_dev_replace, struct page *page)
3299 struct btrfs_device *dev;
3301 DECLARE_COMPLETION_ONSTACK(compl);
3303 dev = sctx->wr_ctx.tgtdev;
3307 printk_ratelimited(KERN_WARNING
3308 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3311 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
3313 spin_lock(&sctx->stat_lock);
3314 sctx->stat.malloc_errors++;
3315 spin_unlock(&sctx->stat_lock);
3318 bio->bi_private = &compl;
3319 bio->bi_end_io = scrub_complete_bio_end_io;
3321 bio->bi_sector = physical_for_dev_replace >> 9;
3322 bio->bi_bdev = dev->bdev;
3323 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
3324 if (ret != PAGE_CACHE_SIZE) {
3327 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3330 btrfsic_submit_bio(WRITE_SYNC, bio);
3331 wait_for_completion(&compl);
3333 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
3334 goto leave_with_eio;