Btrfs: wake up @scrub_pause_wait as much as we can
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_page {
67         struct scrub_block      *sblock;
68         struct page             *page;
69         struct btrfs_device     *dev;
70         u64                     flags;  /* extent flags */
71         u64                     generation;
72         u64                     logical;
73         u64                     physical;
74         u64                     physical_for_dev_replace;
75         atomic_t                ref_count;
76         struct {
77                 unsigned int    mirror_num:8;
78                 unsigned int    have_csum:1;
79                 unsigned int    io_error:1;
80         };
81         u8                      csum[BTRFS_CSUM_SIZE];
82 };
83
84 struct scrub_bio {
85         int                     index;
86         struct scrub_ctx        *sctx;
87         struct btrfs_device     *dev;
88         struct bio              *bio;
89         int                     err;
90         u64                     logical;
91         u64                     physical;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
94 #else
95         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
96 #endif
97         int                     page_count;
98         int                     next_free;
99         struct btrfs_work       work;
100 };
101
102 struct scrub_block {
103         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
104         int                     page_count;
105         atomic_t                outstanding_pages;
106         atomic_t                ref_count; /* free mem on transition to zero */
107         struct scrub_ctx        *sctx;
108         struct {
109                 unsigned int    header_error:1;
110                 unsigned int    checksum_error:1;
111                 unsigned int    no_io_error_seen:1;
112                 unsigned int    generation_error:1; /* also sets header_error */
113         };
114 };
115
116 struct scrub_wr_ctx {
117         struct scrub_bio *wr_curr_bio;
118         struct btrfs_device *tgtdev;
119         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
120         atomic_t flush_all_writes;
121         struct mutex wr_lock;
122 };
123
124 struct scrub_ctx {
125         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
126         struct btrfs_root       *dev_root;
127         int                     first_free;
128         int                     curr;
129         atomic_t                bios_in_flight;
130         atomic_t                workers_pending;
131         spinlock_t              list_lock;
132         wait_queue_head_t       list_wait;
133         u16                     csum_size;
134         struct list_head        csum_list;
135         atomic_t                cancel_req;
136         int                     readonly;
137         int                     pages_per_rd_bio;
138         u32                     sectorsize;
139         u32                     nodesize;
140         u32                     leafsize;
141
142         int                     is_dev_replace;
143         struct scrub_wr_ctx     wr_ctx;
144
145         /*
146          * statistics
147          */
148         struct btrfs_scrub_progress stat;
149         spinlock_t              stat_lock;
150 };
151
152 struct scrub_fixup_nodatasum {
153         struct scrub_ctx        *sctx;
154         struct btrfs_device     *dev;
155         u64                     logical;
156         struct btrfs_root       *root;
157         struct btrfs_work       work;
158         int                     mirror_num;
159 };
160
161 struct scrub_nocow_inode {
162         u64                     inum;
163         u64                     offset;
164         u64                     root;
165         struct list_head        list;
166 };
167
168 struct scrub_copy_nocow_ctx {
169         struct scrub_ctx        *sctx;
170         u64                     logical;
171         u64                     len;
172         int                     mirror_num;
173         u64                     physical_for_dev_replace;
174         struct list_head        inodes;
175         struct btrfs_work       work;
176 };
177
178 struct scrub_warning {
179         struct btrfs_path       *path;
180         u64                     extent_item_size;
181         char                    *scratch_buf;
182         char                    *msg_buf;
183         const char              *errstr;
184         sector_t                sector;
185         u64                     logical;
186         struct btrfs_device     *dev;
187         int                     msg_bufsize;
188         int                     scratch_bufsize;
189 };
190
191
192 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
193 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
194 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
195 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
196 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
197 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
198                                      struct btrfs_fs_info *fs_info,
199                                      struct scrub_block *original_sblock,
200                                      u64 length, u64 logical,
201                                      struct scrub_block *sblocks_for_recheck);
202 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
203                                 struct scrub_block *sblock, int is_metadata,
204                                 int have_csum, u8 *csum, u64 generation,
205                                 u16 csum_size);
206 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
207                                          struct scrub_block *sblock,
208                                          int is_metadata, int have_csum,
209                                          const u8 *csum, u64 generation,
210                                          u16 csum_size);
211 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
212                                              struct scrub_block *sblock_good,
213                                              int force_write);
214 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
215                                             struct scrub_block *sblock_good,
216                                             int page_num, int force_write);
217 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
218 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
219                                            int page_num);
220 static int scrub_checksum_data(struct scrub_block *sblock);
221 static int scrub_checksum_tree_block(struct scrub_block *sblock);
222 static int scrub_checksum_super(struct scrub_block *sblock);
223 static void scrub_block_get(struct scrub_block *sblock);
224 static void scrub_block_put(struct scrub_block *sblock);
225 static void scrub_page_get(struct scrub_page *spage);
226 static void scrub_page_put(struct scrub_page *spage);
227 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
228                                     struct scrub_page *spage);
229 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
230                        u64 physical, struct btrfs_device *dev, u64 flags,
231                        u64 gen, int mirror_num, u8 *csum, int force,
232                        u64 physical_for_dev_replace);
233 static void scrub_bio_end_io(struct bio *bio, int err);
234 static void scrub_bio_end_io_worker(struct btrfs_work *work);
235 static void scrub_block_complete(struct scrub_block *sblock);
236 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
237                                u64 extent_logical, u64 extent_len,
238                                u64 *extent_physical,
239                                struct btrfs_device **extent_dev,
240                                int *extent_mirror_num);
241 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
242                               struct scrub_wr_ctx *wr_ctx,
243                               struct btrfs_fs_info *fs_info,
244                               struct btrfs_device *dev,
245                               int is_dev_replace);
246 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
247 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
248                                     struct scrub_page *spage);
249 static void scrub_wr_submit(struct scrub_ctx *sctx);
250 static void scrub_wr_bio_end_io(struct bio *bio, int err);
251 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
252 static int write_page_nocow(struct scrub_ctx *sctx,
253                             u64 physical_for_dev_replace, struct page *page);
254 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
255                                       struct scrub_copy_nocow_ctx *ctx);
256 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
257                             int mirror_num, u64 physical_for_dev_replace);
258 static void copy_nocow_pages_worker(struct btrfs_work *work);
259 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
260 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
261
262
263 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
264 {
265         atomic_inc(&sctx->bios_in_flight);
266 }
267
268 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
269 {
270         atomic_dec(&sctx->bios_in_flight);
271         wake_up(&sctx->list_wait);
272 }
273
274 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
275 {
276         while (atomic_read(&fs_info->scrub_pause_req)) {
277                 mutex_unlock(&fs_info->scrub_lock);
278                 wait_event(fs_info->scrub_pause_wait,
279                    atomic_read(&fs_info->scrub_pause_req) == 0);
280                 mutex_lock(&fs_info->scrub_lock);
281         }
282 }
283
284 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
285 {
286         atomic_inc(&fs_info->scrubs_paused);
287         wake_up(&fs_info->scrub_pause_wait);
288
289         mutex_lock(&fs_info->scrub_lock);
290         __scrub_blocked_if_needed(fs_info);
291         atomic_dec(&fs_info->scrubs_paused);
292         mutex_unlock(&fs_info->scrub_lock);
293
294         wake_up(&fs_info->scrub_pause_wait);
295 }
296
297 /*
298  * used for workers that require transaction commits (i.e., for the
299  * NOCOW case)
300  */
301 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
302 {
303         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
304
305         /*
306          * increment scrubs_running to prevent cancel requests from
307          * completing as long as a worker is running. we must also
308          * increment scrubs_paused to prevent deadlocking on pause
309          * requests used for transactions commits (as the worker uses a
310          * transaction context). it is safe to regard the worker
311          * as paused for all matters practical. effectively, we only
312          * avoid cancellation requests from completing.
313          */
314         mutex_lock(&fs_info->scrub_lock);
315         atomic_inc(&fs_info->scrubs_running);
316         atomic_inc(&fs_info->scrubs_paused);
317         mutex_unlock(&fs_info->scrub_lock);
318
319         /*
320          * check if @scrubs_running=@scrubs_paused condition
321          * inside wait_event() is not an atomic operation.
322          * which means we may inc/dec @scrub_running/paused
323          * at any time. Let's wake up @scrub_pause_wait as
324          * much as we can to let commit transaction blocked less.
325          */
326         wake_up(&fs_info->scrub_pause_wait);
327
328         atomic_inc(&sctx->workers_pending);
329 }
330
331 /* used for workers that require transaction commits */
332 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
333 {
334         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
335
336         /*
337          * see scrub_pending_trans_workers_inc() why we're pretending
338          * to be paused in the scrub counters
339          */
340         mutex_lock(&fs_info->scrub_lock);
341         atomic_dec(&fs_info->scrubs_running);
342         atomic_dec(&fs_info->scrubs_paused);
343         mutex_unlock(&fs_info->scrub_lock);
344         atomic_dec(&sctx->workers_pending);
345         wake_up(&fs_info->scrub_pause_wait);
346         wake_up(&sctx->list_wait);
347 }
348
349 static void scrub_free_csums(struct scrub_ctx *sctx)
350 {
351         while (!list_empty(&sctx->csum_list)) {
352                 struct btrfs_ordered_sum *sum;
353                 sum = list_first_entry(&sctx->csum_list,
354                                        struct btrfs_ordered_sum, list);
355                 list_del(&sum->list);
356                 kfree(sum);
357         }
358 }
359
360 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
361 {
362         int i;
363
364         if (!sctx)
365                 return;
366
367         scrub_free_wr_ctx(&sctx->wr_ctx);
368
369         /* this can happen when scrub is cancelled */
370         if (sctx->curr != -1) {
371                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
372
373                 for (i = 0; i < sbio->page_count; i++) {
374                         WARN_ON(!sbio->pagev[i]->page);
375                         scrub_block_put(sbio->pagev[i]->sblock);
376                 }
377                 bio_put(sbio->bio);
378         }
379
380         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
381                 struct scrub_bio *sbio = sctx->bios[i];
382
383                 if (!sbio)
384                         break;
385                 kfree(sbio);
386         }
387
388         scrub_free_csums(sctx);
389         kfree(sctx);
390 }
391
392 static noinline_for_stack
393 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
394 {
395         struct scrub_ctx *sctx;
396         int             i;
397         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
398         int pages_per_rd_bio;
399         int ret;
400
401         /*
402          * the setting of pages_per_rd_bio is correct for scrub but might
403          * be wrong for the dev_replace code where we might read from
404          * different devices in the initial huge bios. However, that
405          * code is able to correctly handle the case when adding a page
406          * to a bio fails.
407          */
408         if (dev->bdev)
409                 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
410                                          bio_get_nr_vecs(dev->bdev));
411         else
412                 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
413         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
414         if (!sctx)
415                 goto nomem;
416         sctx->is_dev_replace = is_dev_replace;
417         sctx->pages_per_rd_bio = pages_per_rd_bio;
418         sctx->curr = -1;
419         sctx->dev_root = dev->dev_root;
420         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
421                 struct scrub_bio *sbio;
422
423                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
424                 if (!sbio)
425                         goto nomem;
426                 sctx->bios[i] = sbio;
427
428                 sbio->index = i;
429                 sbio->sctx = sctx;
430                 sbio->page_count = 0;
431                 sbio->work.func = scrub_bio_end_io_worker;
432
433                 if (i != SCRUB_BIOS_PER_SCTX - 1)
434                         sctx->bios[i]->next_free = i + 1;
435                 else
436                         sctx->bios[i]->next_free = -1;
437         }
438         sctx->first_free = 0;
439         sctx->nodesize = dev->dev_root->nodesize;
440         sctx->leafsize = dev->dev_root->leafsize;
441         sctx->sectorsize = dev->dev_root->sectorsize;
442         atomic_set(&sctx->bios_in_flight, 0);
443         atomic_set(&sctx->workers_pending, 0);
444         atomic_set(&sctx->cancel_req, 0);
445         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
446         INIT_LIST_HEAD(&sctx->csum_list);
447
448         spin_lock_init(&sctx->list_lock);
449         spin_lock_init(&sctx->stat_lock);
450         init_waitqueue_head(&sctx->list_wait);
451
452         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
453                                  fs_info->dev_replace.tgtdev, is_dev_replace);
454         if (ret) {
455                 scrub_free_ctx(sctx);
456                 return ERR_PTR(ret);
457         }
458         return sctx;
459
460 nomem:
461         scrub_free_ctx(sctx);
462         return ERR_PTR(-ENOMEM);
463 }
464
465 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
466                                      void *warn_ctx)
467 {
468         u64 isize;
469         u32 nlink;
470         int ret;
471         int i;
472         struct extent_buffer *eb;
473         struct btrfs_inode_item *inode_item;
474         struct scrub_warning *swarn = warn_ctx;
475         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
476         struct inode_fs_paths *ipath = NULL;
477         struct btrfs_root *local_root;
478         struct btrfs_key root_key;
479
480         root_key.objectid = root;
481         root_key.type = BTRFS_ROOT_ITEM_KEY;
482         root_key.offset = (u64)-1;
483         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
484         if (IS_ERR(local_root)) {
485                 ret = PTR_ERR(local_root);
486                 goto err;
487         }
488
489         ret = inode_item_info(inum, 0, local_root, swarn->path);
490         if (ret) {
491                 btrfs_release_path(swarn->path);
492                 goto err;
493         }
494
495         eb = swarn->path->nodes[0];
496         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
497                                         struct btrfs_inode_item);
498         isize = btrfs_inode_size(eb, inode_item);
499         nlink = btrfs_inode_nlink(eb, inode_item);
500         btrfs_release_path(swarn->path);
501
502         ipath = init_ipath(4096, local_root, swarn->path);
503         if (IS_ERR(ipath)) {
504                 ret = PTR_ERR(ipath);
505                 ipath = NULL;
506                 goto err;
507         }
508         ret = paths_from_inode(inum, ipath);
509
510         if (ret < 0)
511                 goto err;
512
513         /*
514          * we deliberately ignore the bit ipath might have been too small to
515          * hold all of the paths here
516          */
517         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
518                 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
519                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
520                         "length %llu, links %u (path: %s)\n", swarn->errstr,
521                         swarn->logical, rcu_str_deref(swarn->dev->name),
522                         (unsigned long long)swarn->sector, root, inum, offset,
523                         min(isize - offset, (u64)PAGE_SIZE), nlink,
524                         (char *)(unsigned long)ipath->fspath->val[i]);
525
526         free_ipath(ipath);
527         return 0;
528
529 err:
530         printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
531                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
532                 "resolving failed with ret=%d\n", swarn->errstr,
533                 swarn->logical, rcu_str_deref(swarn->dev->name),
534                 (unsigned long long)swarn->sector, root, inum, offset, ret);
535
536         free_ipath(ipath);
537         return 0;
538 }
539
540 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
541 {
542         struct btrfs_device *dev;
543         struct btrfs_fs_info *fs_info;
544         struct btrfs_path *path;
545         struct btrfs_key found_key;
546         struct extent_buffer *eb;
547         struct btrfs_extent_item *ei;
548         struct scrub_warning swarn;
549         unsigned long ptr = 0;
550         u64 extent_item_pos;
551         u64 flags = 0;
552         u64 ref_root;
553         u32 item_size;
554         u8 ref_level;
555         const int bufsize = 4096;
556         int ret;
557
558         WARN_ON(sblock->page_count < 1);
559         dev = sblock->pagev[0]->dev;
560         fs_info = sblock->sctx->dev_root->fs_info;
561
562         path = btrfs_alloc_path();
563
564         swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
565         swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
566         swarn.sector = (sblock->pagev[0]->physical) >> 9;
567         swarn.logical = sblock->pagev[0]->logical;
568         swarn.errstr = errstr;
569         swarn.dev = NULL;
570         swarn.msg_bufsize = bufsize;
571         swarn.scratch_bufsize = bufsize;
572
573         if (!path || !swarn.scratch_buf || !swarn.msg_buf)
574                 goto out;
575
576         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
577                                   &flags);
578         if (ret < 0)
579                 goto out;
580
581         extent_item_pos = swarn.logical - found_key.objectid;
582         swarn.extent_item_size = found_key.offset;
583
584         eb = path->nodes[0];
585         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
586         item_size = btrfs_item_size_nr(eb, path->slots[0]);
587
588         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
589                 do {
590                         ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
591                                                         &ref_root, &ref_level);
592                         printk_in_rcu(KERN_WARNING
593                                 "BTRFS: %s at logical %llu on dev %s, "
594                                 "sector %llu: metadata %s (level %d) in tree "
595                                 "%llu\n", errstr, swarn.logical,
596                                 rcu_str_deref(dev->name),
597                                 (unsigned long long)swarn.sector,
598                                 ref_level ? "node" : "leaf",
599                                 ret < 0 ? -1 : ref_level,
600                                 ret < 0 ? -1 : ref_root);
601                 } while (ret != 1);
602                 btrfs_release_path(path);
603         } else {
604                 btrfs_release_path(path);
605                 swarn.path = path;
606                 swarn.dev = dev;
607                 iterate_extent_inodes(fs_info, found_key.objectid,
608                                         extent_item_pos, 1,
609                                         scrub_print_warning_inode, &swarn);
610         }
611
612 out:
613         btrfs_free_path(path);
614         kfree(swarn.scratch_buf);
615         kfree(swarn.msg_buf);
616 }
617
618 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
619 {
620         struct page *page = NULL;
621         unsigned long index;
622         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
623         int ret;
624         int corrected = 0;
625         struct btrfs_key key;
626         struct inode *inode = NULL;
627         struct btrfs_fs_info *fs_info;
628         u64 end = offset + PAGE_SIZE - 1;
629         struct btrfs_root *local_root;
630         int srcu_index;
631
632         key.objectid = root;
633         key.type = BTRFS_ROOT_ITEM_KEY;
634         key.offset = (u64)-1;
635
636         fs_info = fixup->root->fs_info;
637         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
638
639         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
640         if (IS_ERR(local_root)) {
641                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
642                 return PTR_ERR(local_root);
643         }
644
645         key.type = BTRFS_INODE_ITEM_KEY;
646         key.objectid = inum;
647         key.offset = 0;
648         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
649         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
650         if (IS_ERR(inode))
651                 return PTR_ERR(inode);
652
653         index = offset >> PAGE_CACHE_SHIFT;
654
655         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
656         if (!page) {
657                 ret = -ENOMEM;
658                 goto out;
659         }
660
661         if (PageUptodate(page)) {
662                 if (PageDirty(page)) {
663                         /*
664                          * we need to write the data to the defect sector. the
665                          * data that was in that sector is not in memory,
666                          * because the page was modified. we must not write the
667                          * modified page to that sector.
668                          *
669                          * TODO: what could be done here: wait for the delalloc
670                          *       runner to write out that page (might involve
671                          *       COW) and see whether the sector is still
672                          *       referenced afterwards.
673                          *
674                          * For the meantime, we'll treat this error
675                          * incorrectable, although there is a chance that a
676                          * later scrub will find the bad sector again and that
677                          * there's no dirty page in memory, then.
678                          */
679                         ret = -EIO;
680                         goto out;
681                 }
682                 fs_info = BTRFS_I(inode)->root->fs_info;
683                 ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
684                                         fixup->logical, page,
685                                         fixup->mirror_num);
686                 unlock_page(page);
687                 corrected = !ret;
688         } else {
689                 /*
690                  * we need to get good data first. the general readpage path
691                  * will call repair_io_failure for us, we just have to make
692                  * sure we read the bad mirror.
693                  */
694                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
695                                         EXTENT_DAMAGED, GFP_NOFS);
696                 if (ret) {
697                         /* set_extent_bits should give proper error */
698                         WARN_ON(ret > 0);
699                         if (ret > 0)
700                                 ret = -EFAULT;
701                         goto out;
702                 }
703
704                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
705                                                 btrfs_get_extent,
706                                                 fixup->mirror_num);
707                 wait_on_page_locked(page);
708
709                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
710                                                 end, EXTENT_DAMAGED, 0, NULL);
711                 if (!corrected)
712                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
713                                                 EXTENT_DAMAGED, GFP_NOFS);
714         }
715
716 out:
717         if (page)
718                 put_page(page);
719         if (inode)
720                 iput(inode);
721
722         if (ret < 0)
723                 return ret;
724
725         if (ret == 0 && corrected) {
726                 /*
727                  * we only need to call readpage for one of the inodes belonging
728                  * to this extent. so make iterate_extent_inodes stop
729                  */
730                 return 1;
731         }
732
733         return -EIO;
734 }
735
736 static void scrub_fixup_nodatasum(struct btrfs_work *work)
737 {
738         int ret;
739         struct scrub_fixup_nodatasum *fixup;
740         struct scrub_ctx *sctx;
741         struct btrfs_trans_handle *trans = NULL;
742         struct btrfs_path *path;
743         int uncorrectable = 0;
744
745         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
746         sctx = fixup->sctx;
747
748         path = btrfs_alloc_path();
749         if (!path) {
750                 spin_lock(&sctx->stat_lock);
751                 ++sctx->stat.malloc_errors;
752                 spin_unlock(&sctx->stat_lock);
753                 uncorrectable = 1;
754                 goto out;
755         }
756
757         trans = btrfs_join_transaction(fixup->root);
758         if (IS_ERR(trans)) {
759                 uncorrectable = 1;
760                 goto out;
761         }
762
763         /*
764          * the idea is to trigger a regular read through the standard path. we
765          * read a page from the (failed) logical address by specifying the
766          * corresponding copynum of the failed sector. thus, that readpage is
767          * expected to fail.
768          * that is the point where on-the-fly error correction will kick in
769          * (once it's finished) and rewrite the failed sector if a good copy
770          * can be found.
771          */
772         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
773                                                 path, scrub_fixup_readpage,
774                                                 fixup);
775         if (ret < 0) {
776                 uncorrectable = 1;
777                 goto out;
778         }
779         WARN_ON(ret != 1);
780
781         spin_lock(&sctx->stat_lock);
782         ++sctx->stat.corrected_errors;
783         spin_unlock(&sctx->stat_lock);
784
785 out:
786         if (trans && !IS_ERR(trans))
787                 btrfs_end_transaction(trans, fixup->root);
788         if (uncorrectable) {
789                 spin_lock(&sctx->stat_lock);
790                 ++sctx->stat.uncorrectable_errors;
791                 spin_unlock(&sctx->stat_lock);
792                 btrfs_dev_replace_stats_inc(
793                         &sctx->dev_root->fs_info->dev_replace.
794                         num_uncorrectable_read_errors);
795                 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
796                     "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
797                         fixup->logical, rcu_str_deref(fixup->dev->name));
798         }
799
800         btrfs_free_path(path);
801         kfree(fixup);
802
803         scrub_pending_trans_workers_dec(sctx);
804 }
805
806 /*
807  * scrub_handle_errored_block gets called when either verification of the
808  * pages failed or the bio failed to read, e.g. with EIO. In the latter
809  * case, this function handles all pages in the bio, even though only one
810  * may be bad.
811  * The goal of this function is to repair the errored block by using the
812  * contents of one of the mirrors.
813  */
814 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
815 {
816         struct scrub_ctx *sctx = sblock_to_check->sctx;
817         struct btrfs_device *dev;
818         struct btrfs_fs_info *fs_info;
819         u64 length;
820         u64 logical;
821         u64 generation;
822         unsigned int failed_mirror_index;
823         unsigned int is_metadata;
824         unsigned int have_csum;
825         u8 *csum;
826         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
827         struct scrub_block *sblock_bad;
828         int ret;
829         int mirror_index;
830         int page_num;
831         int success;
832         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
833                                       DEFAULT_RATELIMIT_BURST);
834
835         BUG_ON(sblock_to_check->page_count < 1);
836         fs_info = sctx->dev_root->fs_info;
837         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
838                 /*
839                  * if we find an error in a super block, we just report it.
840                  * They will get written with the next transaction commit
841                  * anyway
842                  */
843                 spin_lock(&sctx->stat_lock);
844                 ++sctx->stat.super_errors;
845                 spin_unlock(&sctx->stat_lock);
846                 return 0;
847         }
848         length = sblock_to_check->page_count * PAGE_SIZE;
849         logical = sblock_to_check->pagev[0]->logical;
850         generation = sblock_to_check->pagev[0]->generation;
851         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
852         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
853         is_metadata = !(sblock_to_check->pagev[0]->flags &
854                         BTRFS_EXTENT_FLAG_DATA);
855         have_csum = sblock_to_check->pagev[0]->have_csum;
856         csum = sblock_to_check->pagev[0]->csum;
857         dev = sblock_to_check->pagev[0]->dev;
858
859         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
860                 sblocks_for_recheck = NULL;
861                 goto nodatasum_case;
862         }
863
864         /*
865          * read all mirrors one after the other. This includes to
866          * re-read the extent or metadata block that failed (that was
867          * the cause that this fixup code is called) another time,
868          * page by page this time in order to know which pages
869          * caused I/O errors and which ones are good (for all mirrors).
870          * It is the goal to handle the situation when more than one
871          * mirror contains I/O errors, but the errors do not
872          * overlap, i.e. the data can be repaired by selecting the
873          * pages from those mirrors without I/O error on the
874          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
875          * would be that mirror #1 has an I/O error on the first page,
876          * the second page is good, and mirror #2 has an I/O error on
877          * the second page, but the first page is good.
878          * Then the first page of the first mirror can be repaired by
879          * taking the first page of the second mirror, and the
880          * second page of the second mirror can be repaired by
881          * copying the contents of the 2nd page of the 1st mirror.
882          * One more note: if the pages of one mirror contain I/O
883          * errors, the checksum cannot be verified. In order to get
884          * the best data for repairing, the first attempt is to find
885          * a mirror without I/O errors and with a validated checksum.
886          * Only if this is not possible, the pages are picked from
887          * mirrors with I/O errors without considering the checksum.
888          * If the latter is the case, at the end, the checksum of the
889          * repaired area is verified in order to correctly maintain
890          * the statistics.
891          */
892
893         sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
894                                      sizeof(*sblocks_for_recheck),
895                                      GFP_NOFS);
896         if (!sblocks_for_recheck) {
897                 spin_lock(&sctx->stat_lock);
898                 sctx->stat.malloc_errors++;
899                 sctx->stat.read_errors++;
900                 sctx->stat.uncorrectable_errors++;
901                 spin_unlock(&sctx->stat_lock);
902                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
903                 goto out;
904         }
905
906         /* setup the context, map the logical blocks and alloc the pages */
907         ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
908                                         logical, sblocks_for_recheck);
909         if (ret) {
910                 spin_lock(&sctx->stat_lock);
911                 sctx->stat.read_errors++;
912                 sctx->stat.uncorrectable_errors++;
913                 spin_unlock(&sctx->stat_lock);
914                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
915                 goto out;
916         }
917         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
918         sblock_bad = sblocks_for_recheck + failed_mirror_index;
919
920         /* build and submit the bios for the failed mirror, check checksums */
921         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
922                             csum, generation, sctx->csum_size);
923
924         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
925             sblock_bad->no_io_error_seen) {
926                 /*
927                  * the error disappeared after reading page by page, or
928                  * the area was part of a huge bio and other parts of the
929                  * bio caused I/O errors, or the block layer merged several
930                  * read requests into one and the error is caused by a
931                  * different bio (usually one of the two latter cases is
932                  * the cause)
933                  */
934                 spin_lock(&sctx->stat_lock);
935                 sctx->stat.unverified_errors++;
936                 spin_unlock(&sctx->stat_lock);
937
938                 if (sctx->is_dev_replace)
939                         scrub_write_block_to_dev_replace(sblock_bad);
940                 goto out;
941         }
942
943         if (!sblock_bad->no_io_error_seen) {
944                 spin_lock(&sctx->stat_lock);
945                 sctx->stat.read_errors++;
946                 spin_unlock(&sctx->stat_lock);
947                 if (__ratelimit(&_rs))
948                         scrub_print_warning("i/o error", sblock_to_check);
949                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
950         } else if (sblock_bad->checksum_error) {
951                 spin_lock(&sctx->stat_lock);
952                 sctx->stat.csum_errors++;
953                 spin_unlock(&sctx->stat_lock);
954                 if (__ratelimit(&_rs))
955                         scrub_print_warning("checksum error", sblock_to_check);
956                 btrfs_dev_stat_inc_and_print(dev,
957                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
958         } else if (sblock_bad->header_error) {
959                 spin_lock(&sctx->stat_lock);
960                 sctx->stat.verify_errors++;
961                 spin_unlock(&sctx->stat_lock);
962                 if (__ratelimit(&_rs))
963                         scrub_print_warning("checksum/header error",
964                                             sblock_to_check);
965                 if (sblock_bad->generation_error)
966                         btrfs_dev_stat_inc_and_print(dev,
967                                 BTRFS_DEV_STAT_GENERATION_ERRS);
968                 else
969                         btrfs_dev_stat_inc_and_print(dev,
970                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
971         }
972
973         if (sctx->readonly) {
974                 ASSERT(!sctx->is_dev_replace);
975                 goto out;
976         }
977
978         if (!is_metadata && !have_csum) {
979                 struct scrub_fixup_nodatasum *fixup_nodatasum;
980
981 nodatasum_case:
982                 WARN_ON(sctx->is_dev_replace);
983
984                 /*
985                  * !is_metadata and !have_csum, this means that the data
986                  * might not be COW'ed, that it might be modified
987                  * concurrently. The general strategy to work on the
988                  * commit root does not help in the case when COW is not
989                  * used.
990                  */
991                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
992                 if (!fixup_nodatasum)
993                         goto did_not_correct_error;
994                 fixup_nodatasum->sctx = sctx;
995                 fixup_nodatasum->dev = dev;
996                 fixup_nodatasum->logical = logical;
997                 fixup_nodatasum->root = fs_info->extent_root;
998                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
999                 scrub_pending_trans_workers_inc(sctx);
1000                 fixup_nodatasum->work.func = scrub_fixup_nodatasum;
1001                 btrfs_queue_worker(&fs_info->scrub_workers,
1002                                    &fixup_nodatasum->work);
1003                 goto out;
1004         }
1005
1006         /*
1007          * now build and submit the bios for the other mirrors, check
1008          * checksums.
1009          * First try to pick the mirror which is completely without I/O
1010          * errors and also does not have a checksum error.
1011          * If one is found, and if a checksum is present, the full block
1012          * that is known to contain an error is rewritten. Afterwards
1013          * the block is known to be corrected.
1014          * If a mirror is found which is completely correct, and no
1015          * checksum is present, only those pages are rewritten that had
1016          * an I/O error in the block to be repaired, since it cannot be
1017          * determined, which copy of the other pages is better (and it
1018          * could happen otherwise that a correct page would be
1019          * overwritten by a bad one).
1020          */
1021         for (mirror_index = 0;
1022              mirror_index < BTRFS_MAX_MIRRORS &&
1023              sblocks_for_recheck[mirror_index].page_count > 0;
1024              mirror_index++) {
1025                 struct scrub_block *sblock_other;
1026
1027                 if (mirror_index == failed_mirror_index)
1028                         continue;
1029                 sblock_other = sblocks_for_recheck + mirror_index;
1030
1031                 /* build and submit the bios, check checksums */
1032                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1033                                     have_csum, csum, generation,
1034                                     sctx->csum_size);
1035
1036                 if (!sblock_other->header_error &&
1037                     !sblock_other->checksum_error &&
1038                     sblock_other->no_io_error_seen) {
1039                         if (sctx->is_dev_replace) {
1040                                 scrub_write_block_to_dev_replace(sblock_other);
1041                         } else {
1042                                 int force_write = is_metadata || have_csum;
1043
1044                                 ret = scrub_repair_block_from_good_copy(
1045                                                 sblock_bad, sblock_other,
1046                                                 force_write);
1047                         }
1048                         if (0 == ret)
1049                                 goto corrected_error;
1050                 }
1051         }
1052
1053         /*
1054          * for dev_replace, pick good pages and write to the target device.
1055          */
1056         if (sctx->is_dev_replace) {
1057                 success = 1;
1058                 for (page_num = 0; page_num < sblock_bad->page_count;
1059                      page_num++) {
1060                         int sub_success;
1061
1062                         sub_success = 0;
1063                         for (mirror_index = 0;
1064                              mirror_index < BTRFS_MAX_MIRRORS &&
1065                              sblocks_for_recheck[mirror_index].page_count > 0;
1066                              mirror_index++) {
1067                                 struct scrub_block *sblock_other =
1068                                         sblocks_for_recheck + mirror_index;
1069                                 struct scrub_page *page_other =
1070                                         sblock_other->pagev[page_num];
1071
1072                                 if (!page_other->io_error) {
1073                                         ret = scrub_write_page_to_dev_replace(
1074                                                         sblock_other, page_num);
1075                                         if (ret == 0) {
1076                                                 /* succeeded for this page */
1077                                                 sub_success = 1;
1078                                                 break;
1079                                         } else {
1080                                                 btrfs_dev_replace_stats_inc(
1081                                                         &sctx->dev_root->
1082                                                         fs_info->dev_replace.
1083                                                         num_write_errors);
1084                                         }
1085                                 }
1086                         }
1087
1088                         if (!sub_success) {
1089                                 /*
1090                                  * did not find a mirror to fetch the page
1091                                  * from. scrub_write_page_to_dev_replace()
1092                                  * handles this case (page->io_error), by
1093                                  * filling the block with zeros before
1094                                  * submitting the write request
1095                                  */
1096                                 success = 0;
1097                                 ret = scrub_write_page_to_dev_replace(
1098                                                 sblock_bad, page_num);
1099                                 if (ret)
1100                                         btrfs_dev_replace_stats_inc(
1101                                                 &sctx->dev_root->fs_info->
1102                                                 dev_replace.num_write_errors);
1103                         }
1104                 }
1105
1106                 goto out;
1107         }
1108
1109         /*
1110          * for regular scrub, repair those pages that are errored.
1111          * In case of I/O errors in the area that is supposed to be
1112          * repaired, continue by picking good copies of those pages.
1113          * Select the good pages from mirrors to rewrite bad pages from
1114          * the area to fix. Afterwards verify the checksum of the block
1115          * that is supposed to be repaired. This verification step is
1116          * only done for the purpose of statistic counting and for the
1117          * final scrub report, whether errors remain.
1118          * A perfect algorithm could make use of the checksum and try
1119          * all possible combinations of pages from the different mirrors
1120          * until the checksum verification succeeds. For example, when
1121          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1122          * of mirror #2 is readable but the final checksum test fails,
1123          * then the 2nd page of mirror #3 could be tried, whether now
1124          * the final checksum succeedes. But this would be a rare
1125          * exception and is therefore not implemented. At least it is
1126          * avoided that the good copy is overwritten.
1127          * A more useful improvement would be to pick the sectors
1128          * without I/O error based on sector sizes (512 bytes on legacy
1129          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1130          * mirror could be repaired by taking 512 byte of a different
1131          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1132          * area are unreadable.
1133          */
1134
1135         /* can only fix I/O errors from here on */
1136         if (sblock_bad->no_io_error_seen)
1137                 goto did_not_correct_error;
1138
1139         success = 1;
1140         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1141                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1142
1143                 if (!page_bad->io_error)
1144                         continue;
1145
1146                 for (mirror_index = 0;
1147                      mirror_index < BTRFS_MAX_MIRRORS &&
1148                      sblocks_for_recheck[mirror_index].page_count > 0;
1149                      mirror_index++) {
1150                         struct scrub_block *sblock_other = sblocks_for_recheck +
1151                                                            mirror_index;
1152                         struct scrub_page *page_other = sblock_other->pagev[
1153                                                         page_num];
1154
1155                         if (!page_other->io_error) {
1156                                 ret = scrub_repair_page_from_good_copy(
1157                                         sblock_bad, sblock_other, page_num, 0);
1158                                 if (0 == ret) {
1159                                         page_bad->io_error = 0;
1160                                         break; /* succeeded for this page */
1161                                 }
1162                         }
1163                 }
1164
1165                 if (page_bad->io_error) {
1166                         /* did not find a mirror to copy the page from */
1167                         success = 0;
1168                 }
1169         }
1170
1171         if (success) {
1172                 if (is_metadata || have_csum) {
1173                         /*
1174                          * need to verify the checksum now that all
1175                          * sectors on disk are repaired (the write
1176                          * request for data to be repaired is on its way).
1177                          * Just be lazy and use scrub_recheck_block()
1178                          * which re-reads the data before the checksum
1179                          * is verified, but most likely the data comes out
1180                          * of the page cache.
1181                          */
1182                         scrub_recheck_block(fs_info, sblock_bad,
1183                                             is_metadata, have_csum, csum,
1184                                             generation, sctx->csum_size);
1185                         if (!sblock_bad->header_error &&
1186                             !sblock_bad->checksum_error &&
1187                             sblock_bad->no_io_error_seen)
1188                                 goto corrected_error;
1189                         else
1190                                 goto did_not_correct_error;
1191                 } else {
1192 corrected_error:
1193                         spin_lock(&sctx->stat_lock);
1194                         sctx->stat.corrected_errors++;
1195                         spin_unlock(&sctx->stat_lock);
1196                         printk_ratelimited_in_rcu(KERN_ERR
1197                                 "BTRFS: fixed up error at logical %llu on dev %s\n",
1198                                 logical, rcu_str_deref(dev->name));
1199                 }
1200         } else {
1201 did_not_correct_error:
1202                 spin_lock(&sctx->stat_lock);
1203                 sctx->stat.uncorrectable_errors++;
1204                 spin_unlock(&sctx->stat_lock);
1205                 printk_ratelimited_in_rcu(KERN_ERR
1206                         "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1207                         logical, rcu_str_deref(dev->name));
1208         }
1209
1210 out:
1211         if (sblocks_for_recheck) {
1212                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1213                      mirror_index++) {
1214                         struct scrub_block *sblock = sblocks_for_recheck +
1215                                                      mirror_index;
1216                         int page_index;
1217
1218                         for (page_index = 0; page_index < sblock->page_count;
1219                              page_index++) {
1220                                 sblock->pagev[page_index]->sblock = NULL;
1221                                 scrub_page_put(sblock->pagev[page_index]);
1222                         }
1223                 }
1224                 kfree(sblocks_for_recheck);
1225         }
1226
1227         return 0;
1228 }
1229
1230 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1231                                      struct btrfs_fs_info *fs_info,
1232                                      struct scrub_block *original_sblock,
1233                                      u64 length, u64 logical,
1234                                      struct scrub_block *sblocks_for_recheck)
1235 {
1236         int page_index;
1237         int mirror_index;
1238         int ret;
1239
1240         /*
1241          * note: the two members ref_count and outstanding_pages
1242          * are not used (and not set) in the blocks that are used for
1243          * the recheck procedure
1244          */
1245
1246         page_index = 0;
1247         while (length > 0) {
1248                 u64 sublen = min_t(u64, length, PAGE_SIZE);
1249                 u64 mapped_length = sublen;
1250                 struct btrfs_bio *bbio = NULL;
1251
1252                 /*
1253                  * with a length of PAGE_SIZE, each returned stripe
1254                  * represents one mirror
1255                  */
1256                 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
1257                                       &mapped_length, &bbio, 0);
1258                 if (ret || !bbio || mapped_length < sublen) {
1259                         kfree(bbio);
1260                         return -EIO;
1261                 }
1262
1263                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1264                 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1265                      mirror_index++) {
1266                         struct scrub_block *sblock;
1267                         struct scrub_page *page;
1268
1269                         if (mirror_index >= BTRFS_MAX_MIRRORS)
1270                                 continue;
1271
1272                         sblock = sblocks_for_recheck + mirror_index;
1273                         sblock->sctx = sctx;
1274                         page = kzalloc(sizeof(*page), GFP_NOFS);
1275                         if (!page) {
1276 leave_nomem:
1277                                 spin_lock(&sctx->stat_lock);
1278                                 sctx->stat.malloc_errors++;
1279                                 spin_unlock(&sctx->stat_lock);
1280                                 kfree(bbio);
1281                                 return -ENOMEM;
1282                         }
1283                         scrub_page_get(page);
1284                         sblock->pagev[page_index] = page;
1285                         page->logical = logical;
1286                         page->physical = bbio->stripes[mirror_index].physical;
1287                         BUG_ON(page_index >= original_sblock->page_count);
1288                         page->physical_for_dev_replace =
1289                                 original_sblock->pagev[page_index]->
1290                                 physical_for_dev_replace;
1291                         /* for missing devices, dev->bdev is NULL */
1292                         page->dev = bbio->stripes[mirror_index].dev;
1293                         page->mirror_num = mirror_index + 1;
1294                         sblock->page_count++;
1295                         page->page = alloc_page(GFP_NOFS);
1296                         if (!page->page)
1297                                 goto leave_nomem;
1298                 }
1299                 kfree(bbio);
1300                 length -= sublen;
1301                 logical += sublen;
1302                 page_index++;
1303         }
1304
1305         return 0;
1306 }
1307
1308 /*
1309  * this function will check the on disk data for checksum errors, header
1310  * errors and read I/O errors. If any I/O errors happen, the exact pages
1311  * which are errored are marked as being bad. The goal is to enable scrub
1312  * to take those pages that are not errored from all the mirrors so that
1313  * the pages that are errored in the just handled mirror can be repaired.
1314  */
1315 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1316                                 struct scrub_block *sblock, int is_metadata,
1317                                 int have_csum, u8 *csum, u64 generation,
1318                                 u16 csum_size)
1319 {
1320         int page_num;
1321
1322         sblock->no_io_error_seen = 1;
1323         sblock->header_error = 0;
1324         sblock->checksum_error = 0;
1325
1326         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1327                 struct bio *bio;
1328                 struct scrub_page *page = sblock->pagev[page_num];
1329
1330                 if (page->dev->bdev == NULL) {
1331                         page->io_error = 1;
1332                         sblock->no_io_error_seen = 0;
1333                         continue;
1334                 }
1335
1336                 WARN_ON(!page->page);
1337                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1338                 if (!bio) {
1339                         page->io_error = 1;
1340                         sblock->no_io_error_seen = 0;
1341                         continue;
1342                 }
1343                 bio->bi_bdev = page->dev->bdev;
1344                 bio->bi_sector = page->physical >> 9;
1345
1346                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1347                 if (btrfsic_submit_bio_wait(READ, bio))
1348                         sblock->no_io_error_seen = 0;
1349
1350                 bio_put(bio);
1351         }
1352
1353         if (sblock->no_io_error_seen)
1354                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1355                                              have_csum, csum, generation,
1356                                              csum_size);
1357
1358         return;
1359 }
1360
1361 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1362                                          struct scrub_block *sblock,
1363                                          int is_metadata, int have_csum,
1364                                          const u8 *csum, u64 generation,
1365                                          u16 csum_size)
1366 {
1367         int page_num;
1368         u8 calculated_csum[BTRFS_CSUM_SIZE];
1369         u32 crc = ~(u32)0;
1370         void *mapped_buffer;
1371
1372         WARN_ON(!sblock->pagev[0]->page);
1373         if (is_metadata) {
1374                 struct btrfs_header *h;
1375
1376                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1377                 h = (struct btrfs_header *)mapped_buffer;
1378
1379                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1380                     memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1381                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1382                            BTRFS_UUID_SIZE)) {
1383                         sblock->header_error = 1;
1384                 } else if (generation != btrfs_stack_header_generation(h)) {
1385                         sblock->header_error = 1;
1386                         sblock->generation_error = 1;
1387                 }
1388                 csum = h->csum;
1389         } else {
1390                 if (!have_csum)
1391                         return;
1392
1393                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1394         }
1395
1396         for (page_num = 0;;) {
1397                 if (page_num == 0 && is_metadata)
1398                         crc = btrfs_csum_data(
1399                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1400                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1401                 else
1402                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1403
1404                 kunmap_atomic(mapped_buffer);
1405                 page_num++;
1406                 if (page_num >= sblock->page_count)
1407                         break;
1408                 WARN_ON(!sblock->pagev[page_num]->page);
1409
1410                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1411         }
1412
1413         btrfs_csum_final(crc, calculated_csum);
1414         if (memcmp(calculated_csum, csum, csum_size))
1415                 sblock->checksum_error = 1;
1416 }
1417
1418 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1419                                              struct scrub_block *sblock_good,
1420                                              int force_write)
1421 {
1422         int page_num;
1423         int ret = 0;
1424
1425         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1426                 int ret_sub;
1427
1428                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1429                                                            sblock_good,
1430                                                            page_num,
1431                                                            force_write);
1432                 if (ret_sub)
1433                         ret = ret_sub;
1434         }
1435
1436         return ret;
1437 }
1438
1439 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1440                                             struct scrub_block *sblock_good,
1441                                             int page_num, int force_write)
1442 {
1443         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1444         struct scrub_page *page_good = sblock_good->pagev[page_num];
1445
1446         BUG_ON(page_bad->page == NULL);
1447         BUG_ON(page_good->page == NULL);
1448         if (force_write || sblock_bad->header_error ||
1449             sblock_bad->checksum_error || page_bad->io_error) {
1450                 struct bio *bio;
1451                 int ret;
1452
1453                 if (!page_bad->dev->bdev) {
1454                         printk_ratelimited(KERN_WARNING "BTRFS: "
1455                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1456                                 "is unexpected!\n");
1457                         return -EIO;
1458                 }
1459
1460                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1461                 if (!bio)
1462                         return -EIO;
1463                 bio->bi_bdev = page_bad->dev->bdev;
1464                 bio->bi_sector = page_bad->physical >> 9;
1465
1466                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1467                 if (PAGE_SIZE != ret) {
1468                         bio_put(bio);
1469                         return -EIO;
1470                 }
1471
1472                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1473                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1474                                 BTRFS_DEV_STAT_WRITE_ERRS);
1475                         btrfs_dev_replace_stats_inc(
1476                                 &sblock_bad->sctx->dev_root->fs_info->
1477                                 dev_replace.num_write_errors);
1478                         bio_put(bio);
1479                         return -EIO;
1480                 }
1481                 bio_put(bio);
1482         }
1483
1484         return 0;
1485 }
1486
1487 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1488 {
1489         int page_num;
1490
1491         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1492                 int ret;
1493
1494                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1495                 if (ret)
1496                         btrfs_dev_replace_stats_inc(
1497                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1498                                 num_write_errors);
1499         }
1500 }
1501
1502 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1503                                            int page_num)
1504 {
1505         struct scrub_page *spage = sblock->pagev[page_num];
1506
1507         BUG_ON(spage->page == NULL);
1508         if (spage->io_error) {
1509                 void *mapped_buffer = kmap_atomic(spage->page);
1510
1511                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1512                 flush_dcache_page(spage->page);
1513                 kunmap_atomic(mapped_buffer);
1514         }
1515         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1516 }
1517
1518 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1519                                     struct scrub_page *spage)
1520 {
1521         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1522         struct scrub_bio *sbio;
1523         int ret;
1524
1525         mutex_lock(&wr_ctx->wr_lock);
1526 again:
1527         if (!wr_ctx->wr_curr_bio) {
1528                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1529                                               GFP_NOFS);
1530                 if (!wr_ctx->wr_curr_bio) {
1531                         mutex_unlock(&wr_ctx->wr_lock);
1532                         return -ENOMEM;
1533                 }
1534                 wr_ctx->wr_curr_bio->sctx = sctx;
1535                 wr_ctx->wr_curr_bio->page_count = 0;
1536         }
1537         sbio = wr_ctx->wr_curr_bio;
1538         if (sbio->page_count == 0) {
1539                 struct bio *bio;
1540
1541                 sbio->physical = spage->physical_for_dev_replace;
1542                 sbio->logical = spage->logical;
1543                 sbio->dev = wr_ctx->tgtdev;
1544                 bio = sbio->bio;
1545                 if (!bio) {
1546                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1547                         if (!bio) {
1548                                 mutex_unlock(&wr_ctx->wr_lock);
1549                                 return -ENOMEM;
1550                         }
1551                         sbio->bio = bio;
1552                 }
1553
1554                 bio->bi_private = sbio;
1555                 bio->bi_end_io = scrub_wr_bio_end_io;
1556                 bio->bi_bdev = sbio->dev->bdev;
1557                 bio->bi_sector = sbio->physical >> 9;
1558                 sbio->err = 0;
1559         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1560                    spage->physical_for_dev_replace ||
1561                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1562                    spage->logical) {
1563                 scrub_wr_submit(sctx);
1564                 goto again;
1565         }
1566
1567         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1568         if (ret != PAGE_SIZE) {
1569                 if (sbio->page_count < 1) {
1570                         bio_put(sbio->bio);
1571                         sbio->bio = NULL;
1572                         mutex_unlock(&wr_ctx->wr_lock);
1573                         return -EIO;
1574                 }
1575                 scrub_wr_submit(sctx);
1576                 goto again;
1577         }
1578
1579         sbio->pagev[sbio->page_count] = spage;
1580         scrub_page_get(spage);
1581         sbio->page_count++;
1582         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1583                 scrub_wr_submit(sctx);
1584         mutex_unlock(&wr_ctx->wr_lock);
1585
1586         return 0;
1587 }
1588
1589 static void scrub_wr_submit(struct scrub_ctx *sctx)
1590 {
1591         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1592         struct scrub_bio *sbio;
1593
1594         if (!wr_ctx->wr_curr_bio)
1595                 return;
1596
1597         sbio = wr_ctx->wr_curr_bio;
1598         wr_ctx->wr_curr_bio = NULL;
1599         WARN_ON(!sbio->bio->bi_bdev);
1600         scrub_pending_bio_inc(sctx);
1601         /* process all writes in a single worker thread. Then the block layer
1602          * orders the requests before sending them to the driver which
1603          * doubled the write performance on spinning disks when measured
1604          * with Linux 3.5 */
1605         btrfsic_submit_bio(WRITE, sbio->bio);
1606 }
1607
1608 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1609 {
1610         struct scrub_bio *sbio = bio->bi_private;
1611         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1612
1613         sbio->err = err;
1614         sbio->bio = bio;
1615
1616         sbio->work.func = scrub_wr_bio_end_io_worker;
1617         btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work);
1618 }
1619
1620 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1621 {
1622         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1623         struct scrub_ctx *sctx = sbio->sctx;
1624         int i;
1625
1626         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1627         if (sbio->err) {
1628                 struct btrfs_dev_replace *dev_replace =
1629                         &sbio->sctx->dev_root->fs_info->dev_replace;
1630
1631                 for (i = 0; i < sbio->page_count; i++) {
1632                         struct scrub_page *spage = sbio->pagev[i];
1633
1634                         spage->io_error = 1;
1635                         btrfs_dev_replace_stats_inc(&dev_replace->
1636                                                     num_write_errors);
1637                 }
1638         }
1639
1640         for (i = 0; i < sbio->page_count; i++)
1641                 scrub_page_put(sbio->pagev[i]);
1642
1643         bio_put(sbio->bio);
1644         kfree(sbio);
1645         scrub_pending_bio_dec(sctx);
1646 }
1647
1648 static int scrub_checksum(struct scrub_block *sblock)
1649 {
1650         u64 flags;
1651         int ret;
1652
1653         WARN_ON(sblock->page_count < 1);
1654         flags = sblock->pagev[0]->flags;
1655         ret = 0;
1656         if (flags & BTRFS_EXTENT_FLAG_DATA)
1657                 ret = scrub_checksum_data(sblock);
1658         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1659                 ret = scrub_checksum_tree_block(sblock);
1660         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1661                 (void)scrub_checksum_super(sblock);
1662         else
1663                 WARN_ON(1);
1664         if (ret)
1665                 scrub_handle_errored_block(sblock);
1666
1667         return ret;
1668 }
1669
1670 static int scrub_checksum_data(struct scrub_block *sblock)
1671 {
1672         struct scrub_ctx *sctx = sblock->sctx;
1673         u8 csum[BTRFS_CSUM_SIZE];
1674         u8 *on_disk_csum;
1675         struct page *page;
1676         void *buffer;
1677         u32 crc = ~(u32)0;
1678         int fail = 0;
1679         u64 len;
1680         int index;
1681
1682         BUG_ON(sblock->page_count < 1);
1683         if (!sblock->pagev[0]->have_csum)
1684                 return 0;
1685
1686         on_disk_csum = sblock->pagev[0]->csum;
1687         page = sblock->pagev[0]->page;
1688         buffer = kmap_atomic(page);
1689
1690         len = sctx->sectorsize;
1691         index = 0;
1692         for (;;) {
1693                 u64 l = min_t(u64, len, PAGE_SIZE);
1694
1695                 crc = btrfs_csum_data(buffer, crc, l);
1696                 kunmap_atomic(buffer);
1697                 len -= l;
1698                 if (len == 0)
1699                         break;
1700                 index++;
1701                 BUG_ON(index >= sblock->page_count);
1702                 BUG_ON(!sblock->pagev[index]->page);
1703                 page = sblock->pagev[index]->page;
1704                 buffer = kmap_atomic(page);
1705         }
1706
1707         btrfs_csum_final(crc, csum);
1708         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1709                 fail = 1;
1710
1711         return fail;
1712 }
1713
1714 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1715 {
1716         struct scrub_ctx *sctx = sblock->sctx;
1717         struct btrfs_header *h;
1718         struct btrfs_root *root = sctx->dev_root;
1719         struct btrfs_fs_info *fs_info = root->fs_info;
1720         u8 calculated_csum[BTRFS_CSUM_SIZE];
1721         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1722         struct page *page;
1723         void *mapped_buffer;
1724         u64 mapped_size;
1725         void *p;
1726         u32 crc = ~(u32)0;
1727         int fail = 0;
1728         int crc_fail = 0;
1729         u64 len;
1730         int index;
1731
1732         BUG_ON(sblock->page_count < 1);
1733         page = sblock->pagev[0]->page;
1734         mapped_buffer = kmap_atomic(page);
1735         h = (struct btrfs_header *)mapped_buffer;
1736         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1737
1738         /*
1739          * we don't use the getter functions here, as we
1740          * a) don't have an extent buffer and
1741          * b) the page is already kmapped
1742          */
1743
1744         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1745                 ++fail;
1746
1747         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1748                 ++fail;
1749
1750         if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1751                 ++fail;
1752
1753         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1754                    BTRFS_UUID_SIZE))
1755                 ++fail;
1756
1757         WARN_ON(sctx->nodesize != sctx->leafsize);
1758         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1759         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1760         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1761         index = 0;
1762         for (;;) {
1763                 u64 l = min_t(u64, len, mapped_size);
1764
1765                 crc = btrfs_csum_data(p, crc, l);
1766                 kunmap_atomic(mapped_buffer);
1767                 len -= l;
1768                 if (len == 0)
1769                         break;
1770                 index++;
1771                 BUG_ON(index >= sblock->page_count);
1772                 BUG_ON(!sblock->pagev[index]->page);
1773                 page = sblock->pagev[index]->page;
1774                 mapped_buffer = kmap_atomic(page);
1775                 mapped_size = PAGE_SIZE;
1776                 p = mapped_buffer;
1777         }
1778
1779         btrfs_csum_final(crc, calculated_csum);
1780         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1781                 ++crc_fail;
1782
1783         return fail || crc_fail;
1784 }
1785
1786 static int scrub_checksum_super(struct scrub_block *sblock)
1787 {
1788         struct btrfs_super_block *s;
1789         struct scrub_ctx *sctx = sblock->sctx;
1790         struct btrfs_root *root = sctx->dev_root;
1791         struct btrfs_fs_info *fs_info = root->fs_info;
1792         u8 calculated_csum[BTRFS_CSUM_SIZE];
1793         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1794         struct page *page;
1795         void *mapped_buffer;
1796         u64 mapped_size;
1797         void *p;
1798         u32 crc = ~(u32)0;
1799         int fail_gen = 0;
1800         int fail_cor = 0;
1801         u64 len;
1802         int index;
1803
1804         BUG_ON(sblock->page_count < 1);
1805         page = sblock->pagev[0]->page;
1806         mapped_buffer = kmap_atomic(page);
1807         s = (struct btrfs_super_block *)mapped_buffer;
1808         memcpy(on_disk_csum, s->csum, sctx->csum_size);
1809
1810         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1811                 ++fail_cor;
1812
1813         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1814                 ++fail_gen;
1815
1816         if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1817                 ++fail_cor;
1818
1819         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1820         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1821         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1822         index = 0;
1823         for (;;) {
1824                 u64 l = min_t(u64, len, mapped_size);
1825
1826                 crc = btrfs_csum_data(p, crc, l);
1827                 kunmap_atomic(mapped_buffer);
1828                 len -= l;
1829                 if (len == 0)
1830                         break;
1831                 index++;
1832                 BUG_ON(index >= sblock->page_count);
1833                 BUG_ON(!sblock->pagev[index]->page);
1834                 page = sblock->pagev[index]->page;
1835                 mapped_buffer = kmap_atomic(page);
1836                 mapped_size = PAGE_SIZE;
1837                 p = mapped_buffer;
1838         }
1839
1840         btrfs_csum_final(crc, calculated_csum);
1841         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1842                 ++fail_cor;
1843
1844         if (fail_cor + fail_gen) {
1845                 /*
1846                  * if we find an error in a super block, we just report it.
1847                  * They will get written with the next transaction commit
1848                  * anyway
1849                  */
1850                 spin_lock(&sctx->stat_lock);
1851                 ++sctx->stat.super_errors;
1852                 spin_unlock(&sctx->stat_lock);
1853                 if (fail_cor)
1854                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1855                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1856                 else
1857                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1858                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1859         }
1860
1861         return fail_cor + fail_gen;
1862 }
1863
1864 static void scrub_block_get(struct scrub_block *sblock)
1865 {
1866         atomic_inc(&sblock->ref_count);
1867 }
1868
1869 static void scrub_block_put(struct scrub_block *sblock)
1870 {
1871         if (atomic_dec_and_test(&sblock->ref_count)) {
1872                 int i;
1873
1874                 for (i = 0; i < sblock->page_count; i++)
1875                         scrub_page_put(sblock->pagev[i]);
1876                 kfree(sblock);
1877         }
1878 }
1879
1880 static void scrub_page_get(struct scrub_page *spage)
1881 {
1882         atomic_inc(&spage->ref_count);
1883 }
1884
1885 static void scrub_page_put(struct scrub_page *spage)
1886 {
1887         if (atomic_dec_and_test(&spage->ref_count)) {
1888                 if (spage->page)
1889                         __free_page(spage->page);
1890                 kfree(spage);
1891         }
1892 }
1893
1894 static void scrub_submit(struct scrub_ctx *sctx)
1895 {
1896         struct scrub_bio *sbio;
1897
1898         if (sctx->curr == -1)
1899                 return;
1900
1901         sbio = sctx->bios[sctx->curr];
1902         sctx->curr = -1;
1903         scrub_pending_bio_inc(sctx);
1904
1905         if (!sbio->bio->bi_bdev) {
1906                 /*
1907                  * this case should not happen. If btrfs_map_block() is
1908                  * wrong, it could happen for dev-replace operations on
1909                  * missing devices when no mirrors are available, but in
1910                  * this case it should already fail the mount.
1911                  * This case is handled correctly (but _very_ slowly).
1912                  */
1913                 printk_ratelimited(KERN_WARNING
1914                         "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
1915                 bio_endio(sbio->bio, -EIO);
1916         } else {
1917                 btrfsic_submit_bio(READ, sbio->bio);
1918         }
1919 }
1920
1921 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
1922                                     struct scrub_page *spage)
1923 {
1924         struct scrub_block *sblock = spage->sblock;
1925         struct scrub_bio *sbio;
1926         int ret;
1927
1928 again:
1929         /*
1930          * grab a fresh bio or wait for one to become available
1931          */
1932         while (sctx->curr == -1) {
1933                 spin_lock(&sctx->list_lock);
1934                 sctx->curr = sctx->first_free;
1935                 if (sctx->curr != -1) {
1936                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
1937                         sctx->bios[sctx->curr]->next_free = -1;
1938                         sctx->bios[sctx->curr]->page_count = 0;
1939                         spin_unlock(&sctx->list_lock);
1940                 } else {
1941                         spin_unlock(&sctx->list_lock);
1942                         wait_event(sctx->list_wait, sctx->first_free != -1);
1943                 }
1944         }
1945         sbio = sctx->bios[sctx->curr];
1946         if (sbio->page_count == 0) {
1947                 struct bio *bio;
1948
1949                 sbio->physical = spage->physical;
1950                 sbio->logical = spage->logical;
1951                 sbio->dev = spage->dev;
1952                 bio = sbio->bio;
1953                 if (!bio) {
1954                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
1955                         if (!bio)
1956                                 return -ENOMEM;
1957                         sbio->bio = bio;
1958                 }
1959
1960                 bio->bi_private = sbio;
1961                 bio->bi_end_io = scrub_bio_end_io;
1962                 bio->bi_bdev = sbio->dev->bdev;
1963                 bio->bi_sector = sbio->physical >> 9;
1964                 sbio->err = 0;
1965         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1966                    spage->physical ||
1967                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1968                    spage->logical ||
1969                    sbio->dev != spage->dev) {
1970                 scrub_submit(sctx);
1971                 goto again;
1972         }
1973
1974         sbio->pagev[sbio->page_count] = spage;
1975         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1976         if (ret != PAGE_SIZE) {
1977                 if (sbio->page_count < 1) {
1978                         bio_put(sbio->bio);
1979                         sbio->bio = NULL;
1980                         return -EIO;
1981                 }
1982                 scrub_submit(sctx);
1983                 goto again;
1984         }
1985
1986         scrub_block_get(sblock); /* one for the page added to the bio */
1987         atomic_inc(&sblock->outstanding_pages);
1988         sbio->page_count++;
1989         if (sbio->page_count == sctx->pages_per_rd_bio)
1990                 scrub_submit(sctx);
1991
1992         return 0;
1993 }
1994
1995 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
1996                        u64 physical, struct btrfs_device *dev, u64 flags,
1997                        u64 gen, int mirror_num, u8 *csum, int force,
1998                        u64 physical_for_dev_replace)
1999 {
2000         struct scrub_block *sblock;
2001         int index;
2002
2003         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2004         if (!sblock) {
2005                 spin_lock(&sctx->stat_lock);
2006                 sctx->stat.malloc_errors++;
2007                 spin_unlock(&sctx->stat_lock);
2008                 return -ENOMEM;
2009         }
2010
2011         /* one ref inside this function, plus one for each page added to
2012          * a bio later on */
2013         atomic_set(&sblock->ref_count, 1);
2014         sblock->sctx = sctx;
2015         sblock->no_io_error_seen = 1;
2016
2017         for (index = 0; len > 0; index++) {
2018                 struct scrub_page *spage;
2019                 u64 l = min_t(u64, len, PAGE_SIZE);
2020
2021                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2022                 if (!spage) {
2023 leave_nomem:
2024                         spin_lock(&sctx->stat_lock);
2025                         sctx->stat.malloc_errors++;
2026                         spin_unlock(&sctx->stat_lock);
2027                         scrub_block_put(sblock);
2028                         return -ENOMEM;
2029                 }
2030                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2031                 scrub_page_get(spage);
2032                 sblock->pagev[index] = spage;
2033                 spage->sblock = sblock;
2034                 spage->dev = dev;
2035                 spage->flags = flags;
2036                 spage->generation = gen;
2037                 spage->logical = logical;
2038                 spage->physical = physical;
2039                 spage->physical_for_dev_replace = physical_for_dev_replace;
2040                 spage->mirror_num = mirror_num;
2041                 if (csum) {
2042                         spage->have_csum = 1;
2043                         memcpy(spage->csum, csum, sctx->csum_size);
2044                 } else {
2045                         spage->have_csum = 0;
2046                 }
2047                 sblock->page_count++;
2048                 spage->page = alloc_page(GFP_NOFS);
2049                 if (!spage->page)
2050                         goto leave_nomem;
2051                 len -= l;
2052                 logical += l;
2053                 physical += l;
2054                 physical_for_dev_replace += l;
2055         }
2056
2057         WARN_ON(sblock->page_count == 0);
2058         for (index = 0; index < sblock->page_count; index++) {
2059                 struct scrub_page *spage = sblock->pagev[index];
2060                 int ret;
2061
2062                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2063                 if (ret) {
2064                         scrub_block_put(sblock);
2065                         return ret;
2066                 }
2067         }
2068
2069         if (force)
2070                 scrub_submit(sctx);
2071
2072         /* last one frees, either here or in bio completion for last page */
2073         scrub_block_put(sblock);
2074         return 0;
2075 }
2076
2077 static void scrub_bio_end_io(struct bio *bio, int err)
2078 {
2079         struct scrub_bio *sbio = bio->bi_private;
2080         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2081
2082         sbio->err = err;
2083         sbio->bio = bio;
2084
2085         btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
2086 }
2087
2088 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2089 {
2090         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2091         struct scrub_ctx *sctx = sbio->sctx;
2092         int i;
2093
2094         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2095         if (sbio->err) {
2096                 for (i = 0; i < sbio->page_count; i++) {
2097                         struct scrub_page *spage = sbio->pagev[i];
2098
2099                         spage->io_error = 1;
2100                         spage->sblock->no_io_error_seen = 0;
2101                 }
2102         }
2103
2104         /* now complete the scrub_block items that have all pages completed */
2105         for (i = 0; i < sbio->page_count; i++) {
2106                 struct scrub_page *spage = sbio->pagev[i];
2107                 struct scrub_block *sblock = spage->sblock;
2108
2109                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2110                         scrub_block_complete(sblock);
2111                 scrub_block_put(sblock);
2112         }
2113
2114         bio_put(sbio->bio);
2115         sbio->bio = NULL;
2116         spin_lock(&sctx->list_lock);
2117         sbio->next_free = sctx->first_free;
2118         sctx->first_free = sbio->index;
2119         spin_unlock(&sctx->list_lock);
2120
2121         if (sctx->is_dev_replace &&
2122             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2123                 mutex_lock(&sctx->wr_ctx.wr_lock);
2124                 scrub_wr_submit(sctx);
2125                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2126         }
2127
2128         scrub_pending_bio_dec(sctx);
2129 }
2130
2131 static void scrub_block_complete(struct scrub_block *sblock)
2132 {
2133         if (!sblock->no_io_error_seen) {
2134                 scrub_handle_errored_block(sblock);
2135         } else {
2136                 /*
2137                  * if has checksum error, write via repair mechanism in
2138                  * dev replace case, otherwise write here in dev replace
2139                  * case.
2140                  */
2141                 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
2142                         scrub_write_block_to_dev_replace(sblock);
2143         }
2144 }
2145
2146 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2147                            u8 *csum)
2148 {
2149         struct btrfs_ordered_sum *sum = NULL;
2150         unsigned long index;
2151         unsigned long num_sectors;
2152
2153         while (!list_empty(&sctx->csum_list)) {
2154                 sum = list_first_entry(&sctx->csum_list,
2155                                        struct btrfs_ordered_sum, list);
2156                 if (sum->bytenr > logical)
2157                         return 0;
2158                 if (sum->bytenr + sum->len > logical)
2159                         break;
2160
2161                 ++sctx->stat.csum_discards;
2162                 list_del(&sum->list);
2163                 kfree(sum);
2164                 sum = NULL;
2165         }
2166         if (!sum)
2167                 return 0;
2168
2169         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2170         num_sectors = sum->len / sctx->sectorsize;
2171         memcpy(csum, sum->sums + index, sctx->csum_size);
2172         if (index == num_sectors - 1) {
2173                 list_del(&sum->list);
2174                 kfree(sum);
2175         }
2176         return 1;
2177 }
2178
2179 /* scrub extent tries to collect up to 64 kB for each bio */
2180 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2181                         u64 physical, struct btrfs_device *dev, u64 flags,
2182                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2183 {
2184         int ret;
2185         u8 csum[BTRFS_CSUM_SIZE];
2186         u32 blocksize;
2187
2188         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2189                 blocksize = sctx->sectorsize;
2190                 spin_lock(&sctx->stat_lock);
2191                 sctx->stat.data_extents_scrubbed++;
2192                 sctx->stat.data_bytes_scrubbed += len;
2193                 spin_unlock(&sctx->stat_lock);
2194         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2195                 WARN_ON(sctx->nodesize != sctx->leafsize);
2196                 blocksize = sctx->nodesize;
2197                 spin_lock(&sctx->stat_lock);
2198                 sctx->stat.tree_extents_scrubbed++;
2199                 sctx->stat.tree_bytes_scrubbed += len;
2200                 spin_unlock(&sctx->stat_lock);
2201         } else {
2202                 blocksize = sctx->sectorsize;
2203                 WARN_ON(1);
2204         }
2205
2206         while (len) {
2207                 u64 l = min_t(u64, len, blocksize);
2208                 int have_csum = 0;
2209
2210                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2211                         /* push csums to sbio */
2212                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2213                         if (have_csum == 0)
2214                                 ++sctx->stat.no_csum;
2215                         if (sctx->is_dev_replace && !have_csum) {
2216                                 ret = copy_nocow_pages(sctx, logical, l,
2217                                                        mirror_num,
2218                                                       physical_for_dev_replace);
2219                                 goto behind_scrub_pages;
2220                         }
2221                 }
2222                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2223                                   mirror_num, have_csum ? csum : NULL, 0,
2224                                   physical_for_dev_replace);
2225 behind_scrub_pages:
2226                 if (ret)
2227                         return ret;
2228                 len -= l;
2229                 logical += l;
2230                 physical += l;
2231                 physical_for_dev_replace += l;
2232         }
2233         return 0;
2234 }
2235
2236 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2237                                            struct map_lookup *map,
2238                                            struct btrfs_device *scrub_dev,
2239                                            int num, u64 base, u64 length,
2240                                            int is_dev_replace)
2241 {
2242         struct btrfs_path *path;
2243         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2244         struct btrfs_root *root = fs_info->extent_root;
2245         struct btrfs_root *csum_root = fs_info->csum_root;
2246         struct btrfs_extent_item *extent;
2247         struct blk_plug plug;
2248         u64 flags;
2249         int ret;
2250         int slot;
2251         u64 nstripes;
2252         struct extent_buffer *l;
2253         struct btrfs_key key;
2254         u64 physical;
2255         u64 logical;
2256         u64 logic_end;
2257         u64 generation;
2258         int mirror_num;
2259         struct reada_control *reada1;
2260         struct reada_control *reada2;
2261         struct btrfs_key key_start;
2262         struct btrfs_key key_end;
2263         u64 increment = map->stripe_len;
2264         u64 offset;
2265         u64 extent_logical;
2266         u64 extent_physical;
2267         u64 extent_len;
2268         struct btrfs_device *extent_dev;
2269         int extent_mirror_num;
2270         int stop_loop;
2271
2272         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2273                          BTRFS_BLOCK_GROUP_RAID6)) {
2274                 if (num >= nr_data_stripes(map)) {
2275                         return 0;
2276                 }
2277         }
2278
2279         nstripes = length;
2280         offset = 0;
2281         do_div(nstripes, map->stripe_len);
2282         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2283                 offset = map->stripe_len * num;
2284                 increment = map->stripe_len * map->num_stripes;
2285                 mirror_num = 1;
2286         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2287                 int factor = map->num_stripes / map->sub_stripes;
2288                 offset = map->stripe_len * (num / map->sub_stripes);
2289                 increment = map->stripe_len * factor;
2290                 mirror_num = num % map->sub_stripes + 1;
2291         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2292                 increment = map->stripe_len;
2293                 mirror_num = num % map->num_stripes + 1;
2294         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2295                 increment = map->stripe_len;
2296                 mirror_num = num % map->num_stripes + 1;
2297         } else {
2298                 increment = map->stripe_len;
2299                 mirror_num = 1;
2300         }
2301
2302         path = btrfs_alloc_path();
2303         if (!path)
2304                 return -ENOMEM;
2305
2306         /*
2307          * work on commit root. The related disk blocks are static as
2308          * long as COW is applied. This means, it is save to rewrite
2309          * them to repair disk errors without any race conditions
2310          */
2311         path->search_commit_root = 1;
2312         path->skip_locking = 1;
2313
2314         /*
2315          * trigger the readahead for extent tree csum tree and wait for
2316          * completion. During readahead, the scrub is officially paused
2317          * to not hold off transaction commits
2318          */
2319         logical = base + offset;
2320
2321         wait_event(sctx->list_wait,
2322                    atomic_read(&sctx->bios_in_flight) == 0);
2323         scrub_blocked_if_needed(fs_info);
2324
2325         /* FIXME it might be better to start readahead at commit root */
2326         key_start.objectid = logical;
2327         key_start.type = BTRFS_EXTENT_ITEM_KEY;
2328         key_start.offset = (u64)0;
2329         key_end.objectid = base + offset + nstripes * increment;
2330         key_end.type = BTRFS_METADATA_ITEM_KEY;
2331         key_end.offset = (u64)-1;
2332         reada1 = btrfs_reada_add(root, &key_start, &key_end);
2333
2334         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2335         key_start.type = BTRFS_EXTENT_CSUM_KEY;
2336         key_start.offset = logical;
2337         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2338         key_end.type = BTRFS_EXTENT_CSUM_KEY;
2339         key_end.offset = base + offset + nstripes * increment;
2340         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
2341
2342         if (!IS_ERR(reada1))
2343                 btrfs_reada_wait(reada1);
2344         if (!IS_ERR(reada2))
2345                 btrfs_reada_wait(reada2);
2346
2347
2348         /*
2349          * collect all data csums for the stripe to avoid seeking during
2350          * the scrub. This might currently (crc32) end up to be about 1MB
2351          */
2352         blk_start_plug(&plug);
2353
2354         /*
2355          * now find all extents for each stripe and scrub them
2356          */
2357         logical = base + offset;
2358         physical = map->stripes[num].physical;
2359         logic_end = logical + increment * nstripes;
2360         ret = 0;
2361         while (logical < logic_end) {
2362                 /*
2363                  * canceled?
2364                  */
2365                 if (atomic_read(&fs_info->scrub_cancel_req) ||
2366                     atomic_read(&sctx->cancel_req)) {
2367                         ret = -ECANCELED;
2368                         goto out;
2369                 }
2370                 /*
2371                  * check to see if we have to pause
2372                  */
2373                 if (atomic_read(&fs_info->scrub_pause_req)) {
2374                         /* push queued extents */
2375                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2376                         scrub_submit(sctx);
2377                         mutex_lock(&sctx->wr_ctx.wr_lock);
2378                         scrub_wr_submit(sctx);
2379                         mutex_unlock(&sctx->wr_ctx.wr_lock);
2380                         wait_event(sctx->list_wait,
2381                                    atomic_read(&sctx->bios_in_flight) == 0);
2382                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2383                         scrub_blocked_if_needed(fs_info);
2384                 }
2385
2386                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2387                         key.type = BTRFS_METADATA_ITEM_KEY;
2388                 else
2389                         key.type = BTRFS_EXTENT_ITEM_KEY;
2390                 key.objectid = logical;
2391                 key.offset = (u64)-1;
2392
2393                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2394                 if (ret < 0)
2395                         goto out;
2396
2397                 if (ret > 0) {
2398                         ret = btrfs_previous_extent_item(root, path, 0);
2399                         if (ret < 0)
2400                                 goto out;
2401                         if (ret > 0) {
2402                                 /* there's no smaller item, so stick with the
2403                                  * larger one */
2404                                 btrfs_release_path(path);
2405                                 ret = btrfs_search_slot(NULL, root, &key,
2406                                                         path, 0, 0);
2407                                 if (ret < 0)
2408                                         goto out;
2409                         }
2410                 }
2411
2412                 stop_loop = 0;
2413                 while (1) {
2414                         u64 bytes;
2415
2416                         l = path->nodes[0];
2417                         slot = path->slots[0];
2418                         if (slot >= btrfs_header_nritems(l)) {
2419                                 ret = btrfs_next_leaf(root, path);
2420                                 if (ret == 0)
2421                                         continue;
2422                                 if (ret < 0)
2423                                         goto out;
2424
2425                                 stop_loop = 1;
2426                                 break;
2427                         }
2428                         btrfs_item_key_to_cpu(l, &key, slot);
2429
2430                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2431                                 bytes = root->leafsize;
2432                         else
2433                                 bytes = key.offset;
2434
2435                         if (key.objectid + bytes <= logical)
2436                                 goto next;
2437
2438                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2439                             key.type != BTRFS_METADATA_ITEM_KEY)
2440                                 goto next;
2441
2442                         if (key.objectid >= logical + map->stripe_len) {
2443                                 /* out of this device extent */
2444                                 if (key.objectid >= logic_end)
2445                                         stop_loop = 1;
2446                                 break;
2447                         }
2448
2449                         extent = btrfs_item_ptr(l, slot,
2450                                                 struct btrfs_extent_item);
2451                         flags = btrfs_extent_flags(l, extent);
2452                         generation = btrfs_extent_generation(l, extent);
2453
2454                         if (key.objectid < logical &&
2455                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2456                                 btrfs_err(fs_info,
2457                                            "scrub: tree block %llu spanning "
2458                                            "stripes, ignored. logical=%llu",
2459                                        key.objectid, logical);
2460                                 goto next;
2461                         }
2462
2463 again:
2464                         extent_logical = key.objectid;
2465                         extent_len = bytes;
2466
2467                         /*
2468                          * trim extent to this stripe
2469                          */
2470                         if (extent_logical < logical) {
2471                                 extent_len -= logical - extent_logical;
2472                                 extent_logical = logical;
2473                         }
2474                         if (extent_logical + extent_len >
2475                             logical + map->stripe_len) {
2476                                 extent_len = logical + map->stripe_len -
2477                                              extent_logical;
2478                         }
2479
2480                         extent_physical = extent_logical - logical + physical;
2481                         extent_dev = scrub_dev;
2482                         extent_mirror_num = mirror_num;
2483                         if (is_dev_replace)
2484                                 scrub_remap_extent(fs_info, extent_logical,
2485                                                    extent_len, &extent_physical,
2486                                                    &extent_dev,
2487                                                    &extent_mirror_num);
2488
2489                         ret = btrfs_lookup_csums_range(csum_root, logical,
2490                                                 logical + map->stripe_len - 1,
2491                                                 &sctx->csum_list, 1);
2492                         if (ret)
2493                                 goto out;
2494
2495                         ret = scrub_extent(sctx, extent_logical, extent_len,
2496                                            extent_physical, extent_dev, flags,
2497                                            generation, extent_mirror_num,
2498                                            extent_logical - logical + physical);
2499                         if (ret)
2500                                 goto out;
2501
2502                         scrub_free_csums(sctx);
2503                         if (extent_logical + extent_len <
2504                             key.objectid + bytes) {
2505                                 logical += increment;
2506                                 physical += map->stripe_len;
2507
2508                                 if (logical < key.objectid + bytes) {
2509                                         cond_resched();
2510                                         goto again;
2511                                 }
2512
2513                                 if (logical >= logic_end) {
2514                                         stop_loop = 1;
2515                                         break;
2516                                 }
2517                         }
2518 next:
2519                         path->slots[0]++;
2520                 }
2521                 btrfs_release_path(path);
2522                 logical += increment;
2523                 physical += map->stripe_len;
2524                 spin_lock(&sctx->stat_lock);
2525                 if (stop_loop)
2526                         sctx->stat.last_physical = map->stripes[num].physical +
2527                                                    length;
2528                 else
2529                         sctx->stat.last_physical = physical;
2530                 spin_unlock(&sctx->stat_lock);
2531                 if (stop_loop)
2532                         break;
2533         }
2534 out:
2535         /* push queued extents */
2536         scrub_submit(sctx);
2537         mutex_lock(&sctx->wr_ctx.wr_lock);
2538         scrub_wr_submit(sctx);
2539         mutex_unlock(&sctx->wr_ctx.wr_lock);
2540
2541         blk_finish_plug(&plug);
2542         btrfs_free_path(path);
2543         return ret < 0 ? ret : 0;
2544 }
2545
2546 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2547                                           struct btrfs_device *scrub_dev,
2548                                           u64 chunk_tree, u64 chunk_objectid,
2549                                           u64 chunk_offset, u64 length,
2550                                           u64 dev_offset, int is_dev_replace)
2551 {
2552         struct btrfs_mapping_tree *map_tree =
2553                 &sctx->dev_root->fs_info->mapping_tree;
2554         struct map_lookup *map;
2555         struct extent_map *em;
2556         int i;
2557         int ret = 0;
2558
2559         read_lock(&map_tree->map_tree.lock);
2560         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2561         read_unlock(&map_tree->map_tree.lock);
2562
2563         if (!em)
2564                 return -EINVAL;
2565
2566         map = (struct map_lookup *)em->bdev;
2567         if (em->start != chunk_offset)
2568                 goto out;
2569
2570         if (em->len < length)
2571                 goto out;
2572
2573         for (i = 0; i < map->num_stripes; ++i) {
2574                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2575                     map->stripes[i].physical == dev_offset) {
2576                         ret = scrub_stripe(sctx, map, scrub_dev, i,
2577                                            chunk_offset, length,
2578                                            is_dev_replace);
2579                         if (ret)
2580                                 goto out;
2581                 }
2582         }
2583 out:
2584         free_extent_map(em);
2585
2586         return ret;
2587 }
2588
2589 static noinline_for_stack
2590 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2591                            struct btrfs_device *scrub_dev, u64 start, u64 end,
2592                            int is_dev_replace)
2593 {
2594         struct btrfs_dev_extent *dev_extent = NULL;
2595         struct btrfs_path *path;
2596         struct btrfs_root *root = sctx->dev_root;
2597         struct btrfs_fs_info *fs_info = root->fs_info;
2598         u64 length;
2599         u64 chunk_tree;
2600         u64 chunk_objectid;
2601         u64 chunk_offset;
2602         int ret;
2603         int slot;
2604         struct extent_buffer *l;
2605         struct btrfs_key key;
2606         struct btrfs_key found_key;
2607         struct btrfs_block_group_cache *cache;
2608         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2609
2610         path = btrfs_alloc_path();
2611         if (!path)
2612                 return -ENOMEM;
2613
2614         path->reada = 2;
2615         path->search_commit_root = 1;
2616         path->skip_locking = 1;
2617
2618         key.objectid = scrub_dev->devid;
2619         key.offset = 0ull;
2620         key.type = BTRFS_DEV_EXTENT_KEY;
2621
2622         while (1) {
2623                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2624                 if (ret < 0)
2625                         break;
2626                 if (ret > 0) {
2627                         if (path->slots[0] >=
2628                             btrfs_header_nritems(path->nodes[0])) {
2629                                 ret = btrfs_next_leaf(root, path);
2630                                 if (ret)
2631                                         break;
2632                         }
2633                 }
2634
2635                 l = path->nodes[0];
2636                 slot = path->slots[0];
2637
2638                 btrfs_item_key_to_cpu(l, &found_key, slot);
2639
2640                 if (found_key.objectid != scrub_dev->devid)
2641                         break;
2642
2643                 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2644                         break;
2645
2646                 if (found_key.offset >= end)
2647                         break;
2648
2649                 if (found_key.offset < key.offset)
2650                         break;
2651
2652                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2653                 length = btrfs_dev_extent_length(l, dev_extent);
2654
2655                 if (found_key.offset + length <= start) {
2656                         key.offset = found_key.offset + length;
2657                         btrfs_release_path(path);
2658                         continue;
2659                 }
2660
2661                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2662                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2663                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2664
2665                 /*
2666                  * get a reference on the corresponding block group to prevent
2667                  * the chunk from going away while we scrub it
2668                  */
2669                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2670                 if (!cache) {
2671                         ret = -ENOENT;
2672                         break;
2673                 }
2674                 dev_replace->cursor_right = found_key.offset + length;
2675                 dev_replace->cursor_left = found_key.offset;
2676                 dev_replace->item_needs_writeback = 1;
2677                 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
2678                                   chunk_offset, length, found_key.offset,
2679                                   is_dev_replace);
2680
2681                 /*
2682                  * flush, submit all pending read and write bios, afterwards
2683                  * wait for them.
2684                  * Note that in the dev replace case, a read request causes
2685                  * write requests that are submitted in the read completion
2686                  * worker. Therefore in the current situation, it is required
2687                  * that all write requests are flushed, so that all read and
2688                  * write requests are really completed when bios_in_flight
2689                  * changes to 0.
2690                  */
2691                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2692                 scrub_submit(sctx);
2693                 mutex_lock(&sctx->wr_ctx.wr_lock);
2694                 scrub_wr_submit(sctx);
2695                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2696
2697                 wait_event(sctx->list_wait,
2698                            atomic_read(&sctx->bios_in_flight) == 0);
2699                 atomic_inc(&fs_info->scrubs_paused);
2700                 wake_up(&fs_info->scrub_pause_wait);
2701
2702                 /*
2703                  * must be called before we decrease @scrub_paused.
2704                  * make sure we don't block transaction commit while
2705                  * we are waiting pending workers finished.
2706                  */
2707                 wait_event(sctx->list_wait,
2708                            atomic_read(&sctx->workers_pending) == 0);
2709                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2710
2711                 mutex_lock(&fs_info->scrub_lock);
2712                 __scrub_blocked_if_needed(fs_info);
2713                 atomic_dec(&fs_info->scrubs_paused);
2714                 mutex_unlock(&fs_info->scrub_lock);
2715                 wake_up(&fs_info->scrub_pause_wait);
2716
2717                 btrfs_put_block_group(cache);
2718                 if (ret)
2719                         break;
2720                 if (is_dev_replace &&
2721                     atomic64_read(&dev_replace->num_write_errors) > 0) {
2722                         ret = -EIO;
2723                         break;
2724                 }
2725                 if (sctx->stat.malloc_errors > 0) {
2726                         ret = -ENOMEM;
2727                         break;
2728                 }
2729
2730                 dev_replace->cursor_left = dev_replace->cursor_right;
2731                 dev_replace->item_needs_writeback = 1;
2732
2733                 key.offset = found_key.offset + length;
2734                 btrfs_release_path(path);
2735         }
2736
2737         btrfs_free_path(path);
2738
2739         /*
2740          * ret can still be 1 from search_slot or next_leaf,
2741          * that's not an error
2742          */
2743         return ret < 0 ? ret : 0;
2744 }
2745
2746 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2747                                            struct btrfs_device *scrub_dev)
2748 {
2749         int     i;
2750         u64     bytenr;
2751         u64     gen;
2752         int     ret;
2753         struct btrfs_root *root = sctx->dev_root;
2754
2755         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
2756                 return -EIO;
2757
2758         gen = root->fs_info->last_trans_committed;
2759
2760         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2761                 bytenr = btrfs_sb_offset(i);
2762                 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
2763                         break;
2764
2765                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2766                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
2767                                   NULL, 1, bytenr);
2768                 if (ret)
2769                         return ret;
2770         }
2771         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2772
2773         return 0;
2774 }
2775
2776 /*
2777  * get a reference count on fs_info->scrub_workers. start worker if necessary
2778  */
2779 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
2780                                                 int is_dev_replace)
2781 {
2782         int ret = 0;
2783
2784         if (fs_info->scrub_workers_refcnt == 0) {
2785                 if (is_dev_replace)
2786                         btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1,
2787                                         &fs_info->generic_worker);
2788                 else
2789                         btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2790                                         fs_info->thread_pool_size,
2791                                         &fs_info->generic_worker);
2792                 fs_info->scrub_workers.idle_thresh = 4;
2793                 ret = btrfs_start_workers(&fs_info->scrub_workers);
2794                 if (ret)
2795                         goto out;
2796                 btrfs_init_workers(&fs_info->scrub_wr_completion_workers,
2797                                    "scrubwrc",
2798                                    fs_info->thread_pool_size,
2799                                    &fs_info->generic_worker);
2800                 fs_info->scrub_wr_completion_workers.idle_thresh = 2;
2801                 ret = btrfs_start_workers(
2802                                 &fs_info->scrub_wr_completion_workers);
2803                 if (ret)
2804                         goto out;
2805                 btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1,
2806                                    &fs_info->generic_worker);
2807                 ret = btrfs_start_workers(&fs_info->scrub_nocow_workers);
2808                 if (ret)
2809                         goto out;
2810         }
2811         ++fs_info->scrub_workers_refcnt;
2812 out:
2813         return ret;
2814 }
2815
2816 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
2817 {
2818         if (--fs_info->scrub_workers_refcnt == 0) {
2819                 btrfs_stop_workers(&fs_info->scrub_workers);
2820                 btrfs_stop_workers(&fs_info->scrub_wr_completion_workers);
2821                 btrfs_stop_workers(&fs_info->scrub_nocow_workers);
2822         }
2823         WARN_ON(fs_info->scrub_workers_refcnt < 0);
2824 }
2825
2826 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2827                     u64 end, struct btrfs_scrub_progress *progress,
2828                     int readonly, int is_dev_replace)
2829 {
2830         struct scrub_ctx *sctx;
2831         int ret;
2832         struct btrfs_device *dev;
2833
2834         if (btrfs_fs_closing(fs_info))
2835                 return -EINVAL;
2836
2837         /*
2838          * check some assumptions
2839          */
2840         if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
2841                 btrfs_err(fs_info,
2842                            "scrub: size assumption nodesize == leafsize (%d == %d) fails",
2843                        fs_info->chunk_root->nodesize,
2844                        fs_info->chunk_root->leafsize);
2845                 return -EINVAL;
2846         }
2847
2848         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
2849                 /*
2850                  * in this case scrub is unable to calculate the checksum
2851                  * the way scrub is implemented. Do not handle this
2852                  * situation at all because it won't ever happen.
2853                  */
2854                 btrfs_err(fs_info,
2855                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
2856                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
2857                 return -EINVAL;
2858         }
2859
2860         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
2861                 /* not supported for data w/o checksums */
2862                 btrfs_err(fs_info,
2863                            "scrub: size assumption sectorsize != PAGE_SIZE "
2864                            "(%d != %lu) fails",
2865                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
2866                 return -EINVAL;
2867         }
2868
2869         if (fs_info->chunk_root->nodesize >
2870             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
2871             fs_info->chunk_root->sectorsize >
2872             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
2873                 /*
2874                  * would exhaust the array bounds of pagev member in
2875                  * struct scrub_block
2876                  */
2877                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
2878                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
2879                        fs_info->chunk_root->nodesize,
2880                        SCRUB_MAX_PAGES_PER_BLOCK,
2881                        fs_info->chunk_root->sectorsize,
2882                        SCRUB_MAX_PAGES_PER_BLOCK);
2883                 return -EINVAL;
2884         }
2885
2886
2887         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2888         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
2889         if (!dev || (dev->missing && !is_dev_replace)) {
2890                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2891                 return -ENODEV;
2892         }
2893
2894         mutex_lock(&fs_info->scrub_lock);
2895         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
2896                 mutex_unlock(&fs_info->scrub_lock);
2897                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2898                 return -EIO;
2899         }
2900
2901         btrfs_dev_replace_lock(&fs_info->dev_replace);
2902         if (dev->scrub_device ||
2903             (!is_dev_replace &&
2904              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2905                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
2906                 mutex_unlock(&fs_info->scrub_lock);
2907                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2908                 return -EINPROGRESS;
2909         }
2910         btrfs_dev_replace_unlock(&fs_info->dev_replace);
2911
2912         ret = scrub_workers_get(fs_info, is_dev_replace);
2913         if (ret) {
2914                 mutex_unlock(&fs_info->scrub_lock);
2915                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2916                 return ret;
2917         }
2918
2919         sctx = scrub_setup_ctx(dev, is_dev_replace);
2920         if (IS_ERR(sctx)) {
2921                 mutex_unlock(&fs_info->scrub_lock);
2922                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2923                 scrub_workers_put(fs_info);
2924                 return PTR_ERR(sctx);
2925         }
2926         sctx->readonly = readonly;
2927         dev->scrub_device = sctx;
2928         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2929
2930         /*
2931          * checking @scrub_pause_req here, we can avoid
2932          * race between committing transaction and scrubbing.
2933          */
2934         __scrub_blocked_if_needed(fs_info);
2935         atomic_inc(&fs_info->scrubs_running);
2936         mutex_unlock(&fs_info->scrub_lock);
2937
2938         if (!is_dev_replace) {
2939                 /*
2940                  * by holding device list mutex, we can
2941                  * kick off writing super in log tree sync.
2942                  */
2943                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2944                 ret = scrub_supers(sctx, dev);
2945                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2946         }
2947
2948         if (!ret)
2949                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
2950                                              is_dev_replace);
2951
2952         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2953         atomic_dec(&fs_info->scrubs_running);
2954         wake_up(&fs_info->scrub_pause_wait);
2955
2956         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
2957
2958         if (progress)
2959                 memcpy(progress, &sctx->stat, sizeof(*progress));
2960
2961         mutex_lock(&fs_info->scrub_lock);
2962         dev->scrub_device = NULL;
2963         scrub_workers_put(fs_info);
2964         mutex_unlock(&fs_info->scrub_lock);
2965
2966         scrub_free_ctx(sctx);
2967
2968         return ret;
2969 }
2970
2971 void btrfs_scrub_pause(struct btrfs_root *root)
2972 {
2973         struct btrfs_fs_info *fs_info = root->fs_info;
2974
2975         mutex_lock(&fs_info->scrub_lock);
2976         atomic_inc(&fs_info->scrub_pause_req);
2977         while (atomic_read(&fs_info->scrubs_paused) !=
2978                atomic_read(&fs_info->scrubs_running)) {
2979                 mutex_unlock(&fs_info->scrub_lock);
2980                 wait_event(fs_info->scrub_pause_wait,
2981                            atomic_read(&fs_info->scrubs_paused) ==
2982                            atomic_read(&fs_info->scrubs_running));
2983                 mutex_lock(&fs_info->scrub_lock);
2984         }
2985         mutex_unlock(&fs_info->scrub_lock);
2986 }
2987
2988 void btrfs_scrub_continue(struct btrfs_root *root)
2989 {
2990         struct btrfs_fs_info *fs_info = root->fs_info;
2991
2992         atomic_dec(&fs_info->scrub_pause_req);
2993         wake_up(&fs_info->scrub_pause_wait);
2994 }
2995
2996 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2997 {
2998         mutex_lock(&fs_info->scrub_lock);
2999         if (!atomic_read(&fs_info->scrubs_running)) {
3000                 mutex_unlock(&fs_info->scrub_lock);
3001                 return -ENOTCONN;
3002         }
3003
3004         atomic_inc(&fs_info->scrub_cancel_req);
3005         while (atomic_read(&fs_info->scrubs_running)) {
3006                 mutex_unlock(&fs_info->scrub_lock);
3007                 wait_event(fs_info->scrub_pause_wait,
3008                            atomic_read(&fs_info->scrubs_running) == 0);
3009                 mutex_lock(&fs_info->scrub_lock);
3010         }
3011         atomic_dec(&fs_info->scrub_cancel_req);
3012         mutex_unlock(&fs_info->scrub_lock);
3013
3014         return 0;
3015 }
3016
3017 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3018                            struct btrfs_device *dev)
3019 {
3020         struct scrub_ctx *sctx;
3021
3022         mutex_lock(&fs_info->scrub_lock);
3023         sctx = dev->scrub_device;
3024         if (!sctx) {
3025                 mutex_unlock(&fs_info->scrub_lock);
3026                 return -ENOTCONN;
3027         }
3028         atomic_inc(&sctx->cancel_req);
3029         while (dev->scrub_device) {
3030                 mutex_unlock(&fs_info->scrub_lock);
3031                 wait_event(fs_info->scrub_pause_wait,
3032                            dev->scrub_device == NULL);
3033                 mutex_lock(&fs_info->scrub_lock);
3034         }
3035         mutex_unlock(&fs_info->scrub_lock);
3036
3037         return 0;
3038 }
3039
3040 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3041                          struct btrfs_scrub_progress *progress)
3042 {
3043         struct btrfs_device *dev;
3044         struct scrub_ctx *sctx = NULL;
3045
3046         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3047         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3048         if (dev)
3049                 sctx = dev->scrub_device;
3050         if (sctx)
3051                 memcpy(progress, &sctx->stat, sizeof(*progress));
3052         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3053
3054         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3055 }
3056
3057 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3058                                u64 extent_logical, u64 extent_len,
3059                                u64 *extent_physical,
3060                                struct btrfs_device **extent_dev,
3061                                int *extent_mirror_num)
3062 {
3063         u64 mapped_length;
3064         struct btrfs_bio *bbio = NULL;
3065         int ret;
3066
3067         mapped_length = extent_len;
3068         ret = btrfs_map_block(fs_info, READ, extent_logical,
3069                               &mapped_length, &bbio, 0);
3070         if (ret || !bbio || mapped_length < extent_len ||
3071             !bbio->stripes[0].dev->bdev) {
3072                 kfree(bbio);
3073                 return;
3074         }
3075
3076         *extent_physical = bbio->stripes[0].physical;
3077         *extent_mirror_num = bbio->mirror_num;
3078         *extent_dev = bbio->stripes[0].dev;
3079         kfree(bbio);
3080 }
3081
3082 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3083                               struct scrub_wr_ctx *wr_ctx,
3084                               struct btrfs_fs_info *fs_info,
3085                               struct btrfs_device *dev,
3086                               int is_dev_replace)
3087 {
3088         WARN_ON(wr_ctx->wr_curr_bio != NULL);
3089
3090         mutex_init(&wr_ctx->wr_lock);
3091         wr_ctx->wr_curr_bio = NULL;
3092         if (!is_dev_replace)
3093                 return 0;
3094
3095         WARN_ON(!dev->bdev);
3096         wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3097                                          bio_get_nr_vecs(dev->bdev));
3098         wr_ctx->tgtdev = dev;
3099         atomic_set(&wr_ctx->flush_all_writes, 0);
3100         return 0;
3101 }
3102
3103 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3104 {
3105         mutex_lock(&wr_ctx->wr_lock);
3106         kfree(wr_ctx->wr_curr_bio);
3107         wr_ctx->wr_curr_bio = NULL;
3108         mutex_unlock(&wr_ctx->wr_lock);
3109 }
3110
3111 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3112                             int mirror_num, u64 physical_for_dev_replace)
3113 {
3114         struct scrub_copy_nocow_ctx *nocow_ctx;
3115         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3116
3117         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3118         if (!nocow_ctx) {
3119                 spin_lock(&sctx->stat_lock);
3120                 sctx->stat.malloc_errors++;
3121                 spin_unlock(&sctx->stat_lock);
3122                 return -ENOMEM;
3123         }
3124
3125         scrub_pending_trans_workers_inc(sctx);
3126
3127         nocow_ctx->sctx = sctx;
3128         nocow_ctx->logical = logical;
3129         nocow_ctx->len = len;
3130         nocow_ctx->mirror_num = mirror_num;
3131         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3132         nocow_ctx->work.func = copy_nocow_pages_worker;
3133         INIT_LIST_HEAD(&nocow_ctx->inodes);
3134         btrfs_queue_worker(&fs_info->scrub_nocow_workers,
3135                            &nocow_ctx->work);
3136
3137         return 0;
3138 }
3139
3140 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3141 {
3142         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3143         struct scrub_nocow_inode *nocow_inode;
3144
3145         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3146         if (!nocow_inode)
3147                 return -ENOMEM;
3148         nocow_inode->inum = inum;
3149         nocow_inode->offset = offset;
3150         nocow_inode->root = root;
3151         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3152         return 0;
3153 }
3154
3155 #define COPY_COMPLETE 1
3156
3157 static void copy_nocow_pages_worker(struct btrfs_work *work)
3158 {
3159         struct scrub_copy_nocow_ctx *nocow_ctx =
3160                 container_of(work, struct scrub_copy_nocow_ctx, work);
3161         struct scrub_ctx *sctx = nocow_ctx->sctx;
3162         u64 logical = nocow_ctx->logical;
3163         u64 len = nocow_ctx->len;
3164         int mirror_num = nocow_ctx->mirror_num;
3165         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3166         int ret;
3167         struct btrfs_trans_handle *trans = NULL;
3168         struct btrfs_fs_info *fs_info;
3169         struct btrfs_path *path;
3170         struct btrfs_root *root;
3171         int not_written = 0;
3172
3173         fs_info = sctx->dev_root->fs_info;
3174         root = fs_info->extent_root;
3175
3176         path = btrfs_alloc_path();
3177         if (!path) {
3178                 spin_lock(&sctx->stat_lock);
3179                 sctx->stat.malloc_errors++;
3180                 spin_unlock(&sctx->stat_lock);
3181                 not_written = 1;
3182                 goto out;
3183         }
3184
3185         trans = btrfs_join_transaction(root);
3186         if (IS_ERR(trans)) {
3187                 not_written = 1;
3188                 goto out;
3189         }
3190
3191         ret = iterate_inodes_from_logical(logical, fs_info, path,
3192                                           record_inode_for_nocow, nocow_ctx);
3193         if (ret != 0 && ret != -ENOENT) {
3194                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
3195                         "phys %llu, len %llu, mir %u, ret %d",
3196                         logical, physical_for_dev_replace, len, mirror_num,
3197                         ret);
3198                 not_written = 1;
3199                 goto out;
3200         }
3201
3202         btrfs_end_transaction(trans, root);
3203         trans = NULL;
3204         while (!list_empty(&nocow_ctx->inodes)) {
3205                 struct scrub_nocow_inode *entry;
3206                 entry = list_first_entry(&nocow_ctx->inodes,
3207                                          struct scrub_nocow_inode,
3208                                          list);
3209                 list_del_init(&entry->list);
3210                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
3211                                                  entry->root, nocow_ctx);
3212                 kfree(entry);
3213                 if (ret == COPY_COMPLETE) {
3214                         ret = 0;
3215                         break;
3216                 } else if (ret) {
3217                         break;
3218                 }
3219         }
3220 out:
3221         while (!list_empty(&nocow_ctx->inodes)) {
3222                 struct scrub_nocow_inode *entry;
3223                 entry = list_first_entry(&nocow_ctx->inodes,
3224                                          struct scrub_nocow_inode,
3225                                          list);
3226                 list_del_init(&entry->list);
3227                 kfree(entry);
3228         }
3229         if (trans && !IS_ERR(trans))
3230                 btrfs_end_transaction(trans, root);
3231         if (not_written)
3232                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
3233                                             num_uncorrectable_read_errors);
3234
3235         btrfs_free_path(path);
3236         kfree(nocow_ctx);
3237
3238         scrub_pending_trans_workers_dec(sctx);
3239 }
3240
3241 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3242                                       struct scrub_copy_nocow_ctx *nocow_ctx)
3243 {
3244         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
3245         struct btrfs_key key;
3246         struct inode *inode;
3247         struct page *page;
3248         struct btrfs_root *local_root;
3249         struct btrfs_ordered_extent *ordered;
3250         struct extent_map *em;
3251         struct extent_state *cached_state = NULL;
3252         struct extent_io_tree *io_tree;
3253         u64 physical_for_dev_replace;
3254         u64 len = nocow_ctx->len;
3255         u64 lockstart = offset, lockend = offset + len - 1;
3256         unsigned long index;
3257         int srcu_index;
3258         int ret = 0;
3259         int err = 0;
3260
3261         key.objectid = root;
3262         key.type = BTRFS_ROOT_ITEM_KEY;
3263         key.offset = (u64)-1;
3264
3265         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
3266
3267         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
3268         if (IS_ERR(local_root)) {
3269                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3270                 return PTR_ERR(local_root);
3271         }
3272
3273         key.type = BTRFS_INODE_ITEM_KEY;
3274         key.objectid = inum;
3275         key.offset = 0;
3276         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
3277         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3278         if (IS_ERR(inode))
3279                 return PTR_ERR(inode);
3280
3281         /* Avoid truncate/dio/punch hole.. */
3282         mutex_lock(&inode->i_mutex);
3283         inode_dio_wait(inode);
3284
3285         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3286         io_tree = &BTRFS_I(inode)->io_tree;
3287
3288         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
3289         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
3290         if (ordered) {
3291                 btrfs_put_ordered_extent(ordered);
3292                 goto out_unlock;
3293         }
3294
3295         em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
3296         if (IS_ERR(em)) {
3297                 ret = PTR_ERR(em);
3298                 goto out_unlock;
3299         }
3300
3301         /*
3302          * This extent does not actually cover the logical extent anymore,
3303          * move on to the next inode.
3304          */
3305         if (em->block_start > nocow_ctx->logical ||
3306             em->block_start + em->block_len < nocow_ctx->logical + len) {
3307                 free_extent_map(em);
3308                 goto out_unlock;
3309         }
3310         free_extent_map(em);
3311
3312         while (len >= PAGE_CACHE_SIZE) {
3313                 index = offset >> PAGE_CACHE_SHIFT;
3314 again:
3315                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3316                 if (!page) {
3317                         btrfs_err(fs_info, "find_or_create_page() failed");
3318                         ret = -ENOMEM;
3319                         goto out;
3320                 }
3321
3322                 if (PageUptodate(page)) {
3323                         if (PageDirty(page))
3324                                 goto next_page;
3325                 } else {
3326                         ClearPageError(page);
3327                         err = extent_read_full_page_nolock(io_tree, page,
3328                                                            btrfs_get_extent,
3329                                                            nocow_ctx->mirror_num);
3330                         if (err) {
3331                                 ret = err;
3332                                 goto next_page;
3333                         }
3334
3335                         lock_page(page);
3336                         /*
3337                          * If the page has been remove from the page cache,
3338                          * the data on it is meaningless, because it may be
3339                          * old one, the new data may be written into the new
3340                          * page in the page cache.
3341                          */
3342                         if (page->mapping != inode->i_mapping) {
3343                                 unlock_page(page);
3344                                 page_cache_release(page);
3345                                 goto again;
3346                         }
3347                         if (!PageUptodate(page)) {
3348                                 ret = -EIO;
3349                                 goto next_page;
3350                         }
3351                 }
3352                 err = write_page_nocow(nocow_ctx->sctx,
3353                                        physical_for_dev_replace, page);
3354                 if (err)
3355                         ret = err;
3356 next_page:
3357                 unlock_page(page);
3358                 page_cache_release(page);
3359
3360                 if (ret)
3361                         break;
3362
3363                 offset += PAGE_CACHE_SIZE;
3364                 physical_for_dev_replace += PAGE_CACHE_SIZE;
3365                 len -= PAGE_CACHE_SIZE;
3366         }
3367         ret = COPY_COMPLETE;
3368 out_unlock:
3369         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
3370                              GFP_NOFS);
3371 out:
3372         mutex_unlock(&inode->i_mutex);
3373         iput(inode);
3374         return ret;
3375 }
3376
3377 static int write_page_nocow(struct scrub_ctx *sctx,
3378                             u64 physical_for_dev_replace, struct page *page)
3379 {
3380         struct bio *bio;
3381         struct btrfs_device *dev;
3382         int ret;
3383
3384         dev = sctx->wr_ctx.tgtdev;
3385         if (!dev)
3386                 return -EIO;
3387         if (!dev->bdev) {
3388                 printk_ratelimited(KERN_WARNING
3389                         "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3390                 return -EIO;
3391         }
3392         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
3393         if (!bio) {
3394                 spin_lock(&sctx->stat_lock);
3395                 sctx->stat.malloc_errors++;
3396                 spin_unlock(&sctx->stat_lock);
3397                 return -ENOMEM;
3398         }
3399         bio->bi_size = 0;
3400         bio->bi_sector = physical_for_dev_replace >> 9;
3401         bio->bi_bdev = dev->bdev;
3402         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
3403         if (ret != PAGE_CACHE_SIZE) {
3404 leave_with_eio:
3405                 bio_put(bio);
3406                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3407                 return -EIO;
3408         }
3409
3410         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
3411                 goto leave_with_eio;
3412
3413         bio_put(bio);
3414         return 0;
3415 }